From c121b4c2ce782b31438cc5182f97d49d575d4b18 Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Mon, 25 Sep 2017 11:44:30 +0900 Subject: [PATCH 01/68] iscsi: Use centos image for the base image --- iscsi/targetd/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iscsi/targetd/Dockerfile b/iscsi/targetd/Dockerfile index 0a7d44d5a06..b880af2c934 100644 --- a/iscsi/targetd/Dockerfile +++ b/iscsi/targetd/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM registry.access.redhat.com/rhel7-atomic:latest +FROM centos:7 LABEL authors="Raffaele Spazzoli " ADD iscsi-controller /iscsi-controller ENTRYPOINT ["/iscsi-controller"] From 84ccdd67cefc59a7d1b0f319c8c8c628bc47c0de Mon Sep 17 00:00:00 2001 From: Tomas Smetana Date: Fri, 15 Sep 2017 15:56:16 +0200 Subject: [PATCH 02/68] Snapshot: Unit test for snapshotter, makefile test target --- Makefile | 7 +- snapshot/Makefile | 7 +- .../cloudprovider/providers/aws/aws_test.go | 41 +- .../cloudprovider/providers/gce/gce_test.go | 6 +- .../snapshotter/snapshotter_test.go | 439 ++++++++++++++++++ test.sh | 1 + 6 files changed, 480 insertions(+), 21 deletions(-) create mode 100644 snapshot/pkg/controller/snapshotter/snapshotter_test.go diff --git a/Makefile b/Makefile index 96088aac636..058631b7eee 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ all: aws/efs ceph/cephfs ceph/rbd flex gluster/block gluster/glusterfs iscsi/tar clean: clean-aws/efs clean-ceph/cephfs clean-ceph/rbd clean-flex clean-gluster/block clean-gluster/glusterfs clean-iscsi/targetd clean-local-volume/provisioner clean-local-volume/bootstrapper clean-nfs-client clean-nfs clean-snapshot clean-openstack/standalone-cinder .PHONY: clean -test: test-aws/efs test-local-volume/provisioner test-nfs +test: test-aws/efs test-local-volume/provisioner test-nfs test-snapshot .PHONY: test verify: @@ -181,6 +181,11 @@ clean-openstack/standalone-cinder: make clean .PHONY: clean-openstack/standalone-cinder +test-snapshot: + cd snapshot; \ + make test +.PHONY: test-snapshot + push-cephfs-provisioner: cd ceph/cephfs; \ make push diff --git a/snapshot/Makefile b/snapshot/Makefile index 089403cd12c..a3c8a7eb53e 100644 --- a/snapshot/Makefile +++ b/snapshot/Makefile @@ -12,13 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -.PHONY: all controller +.PHONY: all controller clean test all: controller -controller: +controller: go build -o _output/bin/snapshot-controller cmd/snapshot-controller/snapshot-controller.go go build -o _output/bin/snapshot-provisioner cmd/snapshot-pv-provisioner/snapshot-pv-provisioner.go clean: -rm -rf _output + +test: + go test `go list ./... | grep -v 'vendor'` diff --git a/snapshot/pkg/cloudprovider/providers/aws/aws_test.go b/snapshot/pkg/cloudprovider/providers/aws/aws_test.go index 2f4c0cb7a79..4df1bac7817 100644 --- a/snapshot/pkg/cloudprovider/providers/aws/aws_test.go +++ b/snapshot/pkg/cloudprovider/providers/aws/aws_test.go @@ -35,6 +35,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/kubelet/apis" ) const TestClusterID = "clusterid.test" @@ -285,7 +286,7 @@ func instanceMatchesFilter(instance *ec2.Instance, filter *ec2.Filter) bool { func (e *FakeEC2) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) { matches := []*ec2.Instance{} - for _, instance := range self.aws.instances { + for _, instance := range e.aws.instances { if request.InstanceIds != nil { if instance.InstanceId == nil { glog.Warning("Instance with no instance id: ", instance) @@ -327,7 +328,7 @@ type FakeMetadata struct { func (fake *FakeMetadata) GetMetadata(key string) (string, error) { networkInterfacesPrefix := "network/interfaces/macs/" - i := self.aws.selfInstance + i := fake.aws.selfInstance if key == "placement/availability-zone" { az := "" if i.Placement != nil { @@ -346,14 +347,14 @@ func (fake *FakeMetadata) GetMetadata(key string) (string, error) { return aws.StringValue(i.PublicIpAddress), nil } else if strings.HasPrefix(key, networkInterfacesPrefix) { if key == networkInterfacesPrefix { - return strings.Join(self.aws.networkInterfacesMacs, "/\n") + "/\n", nil + return strings.Join(fake.aws.networkInterfacesMacs, "/\n") + "/\n", nil } keySplit := strings.Split(key, "/") macParam := keySplit[3] if len(keySplit) == 5 && keySplit[4] == "vpc-id" { - for i, macElem := range self.aws.networkInterfacesMacs { + for i, macElem := range fake.aws.networkInterfacesMacs { if macParam == macElem { - return self.aws.networkInterfacesVpcIDs[i], nil + return fake.aws.networkInterfacesVpcIDs[i], nil } } } @@ -383,6 +384,17 @@ func (e *FakeEC2) DeleteVolume(request *ec2.DeleteVolumeInput) (resp *ec2.Delete panic("Not implemented") } +func (e *FakeEC2) CreateSnapshot(*ec2.CreateSnapshotInput) (*ec2.Snapshot, error) { + panic("Not implemented") +} + +func (e *FakeEC2) DeleteSnapshot(*ec2.DeleteSnapshotInput) (*ec2.DeleteSnapshotOutput, error) { + panic("Not implemented") +} + +func (e *FakeEC2) DescribeSnapshots(*ec2.DescribeSnapshotsInput) ([]*ec2.Snapshot, error) { + panic("Not implemented") +} func (e *FakeEC2) DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) { panic("Not implemented") } @@ -404,8 +416,8 @@ func (e *FakeEC2) RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInpu } func (e *FakeEC2) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) { - ec2.DescribeSubnetsInput = request - return ec2.Subnets, nil + e.DescribeSubnetsInput = request + return e.Subnets, nil } func (e *FakeEC2) CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) { @@ -413,8 +425,8 @@ func (e *FakeEC2) CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error } func (e *FakeEC2) DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error) { - ec2.DescribeRouteTablesInput = request - return ec2.RouteTables, nil + e.DescribeRouteTablesInput = request + return e.RouteTables, nil } func (e *FakeEC2) CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) { @@ -597,11 +609,10 @@ func TestNodeAddresses(t *testing.T) { if err3 != nil { t.Errorf("Should not error when instance found") } - if len(addrs3) != 5 { - t.Errorf("Should return exactly 5 NodeAddresses") + if len(addrs3) != 4 { + t.Errorf("Should return exactly 4 NodeAddresses") } testHasNodeAddress(t, addrs3, v1.NodeInternalIP, "192.168.0.1") - testHasNodeAddress(t, addrs3, v1.NodeLegacyHostIP, "192.168.0.1") testHasNodeAddress(t, addrs3, v1.NodeExternalIP, "1.2.3.4") testHasNodeAddress(t, addrs3, v1.NodeExternalDNS, "instance-same.ec2.external") testHasNodeAddress(t, addrs3, v1.NodeInternalDNS, "instance-same.ec2.internal") @@ -1082,13 +1093,13 @@ func TestGetVolumeLabels(t *testing.T) { assert.Nil(t, err, "Error creating Volume %v", err) assert.Equal(t, map[string]string{ - metav1.LabelZoneFailureDomain: "us-east-1a", - metav1.LabelZoneRegion: "us-east-1"}, labels) + apis.LabelZoneFailureDomain: "us-east-1a", + apis.LabelZoneRegion: "us-east-1"}, labels) awsServices.ec2.AssertExpectations(t) } func (ec2 *FakeELB) expectDescribeLoadBalancers(loadBalancerName string) { - self.On("DescribeLoadBalancers", &elb.DescribeLoadBalancersInput{LoadBalancerNames: []*string{aws.String(loadBalancerName)}}).Return(&elb.DescribeLoadBalancersOutput{ + ec2.On("DescribeLoadBalancers", &elb.DescribeLoadBalancersInput{LoadBalancerNames: []*string{aws.String(loadBalancerName)}}).Return(&elb.DescribeLoadBalancersOutput{ LoadBalancerDescriptions: []*elb.LoadBalancerDescription{{}}, }) } diff --git a/snapshot/pkg/cloudprovider/providers/gce/gce_test.go b/snapshot/pkg/cloudprovider/providers/gce/gce_test.go index 5bba03bf97c..87015617dc0 100644 --- a/snapshot/pkg/cloudprovider/providers/gce/gce_test.go +++ b/snapshot/pkg/cloudprovider/providers/gce/gce_test.go @@ -30,7 +30,7 @@ func TestGetRegion(t *testing.T) { if regionName != "us-central1" { t.Errorf("Unexpected region from GetGCERegion: %s", regionName) } - gce := &GCECloud{ + gce := &Cloud{ localZone: zoneName, region: regionName, } @@ -137,7 +137,7 @@ func TestScrubDNS(t *testing.T) { searchesOut: []string{"c.prj.internal.", "zone.c.prj.internal.", "google.internal.", "unexpected"}, }, } - gce := &GCECloud{} + gce := &Cloud{} for i := range tcs { n, s := gce.ScrubDNS(tcs[i].nameserversIn, tcs[i].searchesIn) if !reflect.DeepEqual(n, tcs[i].nameserversOut) { @@ -153,7 +153,7 @@ func TestCreateFirewallFails(t *testing.T) { name := "loadbalancer" region := "us-central1" desc := "description" - gce := &GCECloud{} + gce := &Cloud{} if err := gce.createFirewall(name, region, desc, nil, nil, nil); err == nil { t.Errorf("error expected when creating firewall without any tags found") } diff --git a/snapshot/pkg/controller/snapshotter/snapshotter_test.go b/snapshot/pkg/controller/snapshotter/snapshotter_test.go new file mode 100644 index 00000000000..a509d5a08cb --- /dev/null +++ b/snapshot/pkg/controller/snapshotter/snapshotter_test.go @@ -0,0 +1,439 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package snapshotter + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "testing" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + + crdv1 "github.com/kubernetes-incubator/external-storage/snapshot/pkg/apis/crd/v1" + "github.com/kubernetes-incubator/external-storage/snapshot/pkg/cloudprovider" + "github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/cache" + "github.com/kubernetes-incubator/external-storage/snapshot/pkg/volume" +) + +// TestPlugin methods +type TestPlugin struct { + ShouldFail bool + CreateCallCount int + DeleteCallCount int + RestoreCallCount int + DescribeCallCount int + FindCallCount int + VolumeDeleteCallCount int +} + +func (tp *TestPlugin) Init(cloudprovider.Interface) { +} + +func (tp *TestPlugin) SnapshotCreate(*v1.PersistentVolume, *map[string]string) (*crdv1.VolumeSnapshotDataSource, *[]crdv1.VolumeSnapshotCondition, error) { + tp.CreateCallCount = tp.CreateCallCount + 1 + if tp.ShouldFail { + return nil, nil, fmt.Errorf("SnapshotCreate forced failure") + } + return nil, nil, nil +} + +func (tp *TestPlugin) SnapshotDelete(*crdv1.VolumeSnapshotDataSource, *v1.PersistentVolume) error { + tp.DeleteCallCount = tp.DeleteCallCount + 1 + if tp.ShouldFail { + return fmt.Errorf("SnapshotDelete forced failure") + } + return nil +} + +func (tp *TestPlugin) SnapshotRestore(*crdv1.VolumeSnapshotData, *v1.PersistentVolumeClaim, string, map[string]string) (*v1.PersistentVolumeSource, map[string]string, error) { + return nil, map[string]string{}, nil +} + +func (tp *TestPlugin) DescribeSnapshot(snapshotData *crdv1.VolumeSnapshotData) (snapConditions *[]crdv1.VolumeSnapshotCondition, isCompleted bool, err error) { + return nil, true, nil +} + +func (tp *TestPlugin) FindSnapshot(tags *map[string]string) (*crdv1.VolumeSnapshotDataSource, *[]crdv1.VolumeSnapshotCondition, error) { + return nil, nil, nil +} + +func (tp *TestPlugin) VolumeDelete(pv *v1.PersistentVolume) error { + return nil +} + +// Helper functions +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return f(req) +} + +func dummyRoundTripper(req *http.Request) (*http.Response, error) { + return nil, nil +} + +func objBody(obj runtime.Object) io.ReadCloser { + encodedObj, err := json.Marshal(obj) + if err == nil { + return ioutil.NopCloser(bytes.NewReader(encodedObj)) + } + return ioutil.NopCloser(bytes.NewReader([]byte(""))) + +} + +func fakeVolumeSnapshotDataList() *crdv1.VolumeSnapshotDataList { + return &crdv1.VolumeSnapshotDataList{ + Metadata: metav1.ListMeta{ + ResourceVersion: "", + SelfLink: "", + }, + Items: []crdv1.VolumeSnapshotData{ + { + Metadata: metav1.ObjectMeta{ + Name: "snapshotdata-test-1", + Namespace: "", + CreationTimestamp: metav1.Time{}, + }, + Spec: crdv1.VolumeSnapshotDataSpec{ + VolumeSnapshotDataSource: crdv1.VolumeSnapshotDataSource{ + HostPath: &crdv1.HostPathVolumeSnapshotSource{ + Path: "/fake/file", + }, + }, + PersistentVolumeRef: &v1.ObjectReference{ + Kind: "PersistentVolume", + Name: "fake-pv-1", + }, + VolumeSnapshotRef: &v1.ObjectReference{ + Kind: "VolumeSnapshot", + Name: "fake-snapshot-1", + }, + }, + Status: crdv1.VolumeSnapshotDataStatus{ + Conditions: []crdv1.VolumeSnapshotDataCondition{ + { + LastTransitionTime: metav1.Time{}, + Status: v1.ConditionTrue, + Type: crdv1.VolumeSnapshotDataConditionReady, + }, + }, + }, + }, + { + Metadata: metav1.ObjectMeta{ + Name: "snapshotdata-test-2", + Namespace: "", + CreationTimestamp: metav1.Time{}, + }, + Spec: crdv1.VolumeSnapshotDataSpec{ + VolumeSnapshotDataSource: crdv1.VolumeSnapshotDataSource{ + HostPath: &crdv1.HostPathVolumeSnapshotSource{ + Path: "/fake/file2", + }, + }, + PersistentVolumeRef: &v1.ObjectReference{ + Kind: "PersistentVolume", + Name: "fake-pv-2", + }, + VolumeSnapshotRef: &v1.ObjectReference{ + Kind: "VolumeSnapshot", + Name: "fake-snapshot-2", + }, + }, + Status: crdv1.VolumeSnapshotDataStatus{ + Conditions: []crdv1.VolumeSnapshotDataCondition{ + { + LastTransitionTime: metav1.Time{}, + Status: v1.ConditionTrue, + Type: crdv1.VolumeSnapshotDataConditionReady, + }, + }, + }, + }, + }, + } +} + +func fakeVolumeSnapshotList() *crdv1.VolumeSnapshotList { + return &crdv1.VolumeSnapshotList{ + Metadata: metav1.ListMeta{ + ResourceVersion: "", + SelfLink: "", + }, + Items: []crdv1.VolumeSnapshot{ + { + Metadata: metav1.ObjectMeta{ + Name: "snapshot-test-1", + Namespace: "", + CreationTimestamp: metav1.Time{}, + }, + Spec: crdv1.VolumeSnapshotSpec{ + PersistentVolumeClaimName: "fake-pvc-1", + }, + }, + }, + } +} + +func fakeNewVolumeSnapshot() *crdv1.VolumeSnapshot { + return &crdv1.VolumeSnapshot{ + Metadata: metav1.ObjectMeta{ + Name: "new-snapshot-test-1", + Namespace: "default", + CreationTimestamp: metav1.Time{}, + }, + Spec: crdv1.VolumeSnapshotSpec{ + PersistentVolumeClaimName: "fake-pvc-1", + }, + } +} + +func fakePV() *v1.PersistentVolume { + return &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pv-1", + Namespace: "", + CreationTimestamp: metav1.Time{}, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/fake/path", + }, + }, + }, + } +} + +func fakePVC() *v1.PersistentVolumeClaim { + return &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pvc-1", + Namespace: "default", + CreationTimestamp: metav1.Time{}, + }, + Spec: v1.PersistentVolumeClaimSpec{ + VolumeName: "fake-pv-1", + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: "Bound", + }, + } +} + +// Prepare testing scheme and REST client that uses the roundTripper for http transport +func fakeSchemeAndClient(roundTripper func(*http.Request) (*http.Response, error)) (*runtime.Scheme, *rest.RESTClient, error) { + scheme := runtime.NewScheme() + if err := crdv1.AddToScheme(scheme); err != nil { + return nil, nil, err + } + config := rest.Config{ + APIPath: "/apis", + ContentConfig: rest.ContentConfig{ + GroupVersion: &crdv1.SchemeGroupVersion, + ContentType: runtime.ContentTypeJSON, + NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: serializer.NewCodecFactory(scheme)}, + }, + Transport: roundTripperFunc(roundTripper), + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, nil, err + } + + return scheme, client, nil +} + +// Tests +func Test_NewVolumeSnapshotter(t *testing.T) { + var tp *TestPlugin = &TestPlugin{} + + clientset := fake.NewSimpleClientset() + asw := cache.NewActualStateOfWorld() + plugins := map[string]volume.Plugin{"hostPath": tp} + scheme, client, err := fakeSchemeAndClient(dummyRoundTripper) + if err != nil { + t.Errorf("Failed to create test client: %v", err) + } + + vs := NewVolumeSnapshotter(client, scheme, clientset, asw, &plugins) + if vs == nil { + t.Errorf("Test failed: could not create volume snapshotter") + } +} + +func Test_getSnapshotDataFromSnapshotName(t *testing.T) { + var tp *TestPlugin = &TestPlugin{} + + clientset := fake.NewSimpleClientset() + asw := cache.NewActualStateOfWorld() + plugins := map[string]volume.Plugin{"hostPath": tp} + scheme, client, err := fakeSchemeAndClient(func(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + return &http.Response{ + StatusCode: http.StatusOK, + Header: header, + Body: objBody(fakeVolumeSnapshotDataList()), + }, nil + }) + if err != nil { + t.Errorf("Failed to create test client: %v", err) + } + + vsObj := NewVolumeSnapshotter(client, scheme, clientset, asw, &plugins) + if vsObj == nil { + t.Errorf("Test failed: could not create volume snapshotter") + } + + vs := vsObj.(*volumeSnapshotter) + + snapData := vs.getSnapshotDataFromSnapshotName("fake-snapshot-1") + if snapData == nil { + t.Errorf("Failure: did not find VolumeSnpshotData by VolumeSnapshot name") + } + if snapData.Metadata.Name != "snapshotdata-test-1" { + t.Errorf("Failure: found incorrect VolumeSnpshotData for VumeSnapshot") + } +} + +func Test_takeSnapshot(t *testing.T) { + var tp *TestPlugin = &TestPlugin{} + + clientset := fake.NewSimpleClientset() + asw := cache.NewActualStateOfWorld() + plugins := map[string]volume.Plugin{"hostPath": tp} + scheme, client, err := fakeSchemeAndClient(dummyRoundTripper) + if err != nil { + t.Errorf("Failed to create test client: %v", err) + } + + vsObj := NewVolumeSnapshotter(client, scheme, clientset, asw, &plugins) + if vsObj == nil { + t.Errorf("Test failed: could not create volume snapshotter") + } + vs := vsObj.(*volumeSnapshotter) + + pv := fakePV() + tags := map[string]string{ + "tag1": "tag value 1", + "tag2": "tag value 2", + } + _, _, err = vs.takeSnapshot(pv, &tags) + if err != nil { + t.Errorf("Test failed, unexpected error: %v", err) + } + tp.ShouldFail = true + _, _, err = vs.takeSnapshot(pv, &tags) + if err != nil { + t.Errorf("Test failed, unexpected error: %v", err) + } + + if tp.CreateCallCount != 2 { + t.Errorf("Test failed, expected 2 CreateSnapshot calls in plugin, got %d", tp.CreateCallCount) + } +} + +func Test_deleteSnapshot(t *testing.T) { + var tp *TestPlugin = &TestPlugin{} + + clientset := fake.NewSimpleClientset() + asw := cache.NewActualStateOfWorld() + plugins := map[string]volume.Plugin{"hostPath": tp} + scheme, client, err := fakeSchemeAndClient(dummyRoundTripper) + if err != nil { + t.Errorf("Failed to create test client: %v", err) + } + + vsObj := NewVolumeSnapshotter(client, scheme, clientset, asw, &plugins) + if vsObj == nil { + t.Errorf("Test failed: could not create volume snapshotter") + } + vs := vsObj.(*volumeSnapshotter) + + snapDataList := fakeVolumeSnapshotDataList() + err = vs.deleteSnapshot(&snapDataList.Items[0].Spec) + if err != nil { + t.Errorf("Test failed, unexpected error: %v", err) + } + tp.ShouldFail = true + err = vs.deleteSnapshot(&snapDataList.Items[0].Spec) + if err == nil { + t.Errorf("Test failed, expected error got nil") + } + + if tp.DeleteCallCount != 2 { + t.Errorf("Test failed, expected 2 DeleteSnapshot calls in plugin, got %d", tp.CreateCallCount) + } +} + +func Test_createSnapshotData(t *testing.T) { + var tp *TestPlugin = &TestPlugin{} + + clientset := fake.NewSimpleClientset(fakePVC(), fakePV()) + asw := cache.NewActualStateOfWorld() + plugins := map[string]volume.Plugin{"hostPath": tp} + scheme, client, err := fakeSchemeAndClient(func(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + retObj, _ := req.GetBody() // Just return the VolumeSnapshotData as is + return &http.Response{ + StatusCode: http.StatusOK, + Header: header, + Body: retObj, + }, nil + }) + if err != nil { + t.Errorf("Failed to create test client: %v", err) + } + + vsObj := NewVolumeSnapshotter(client, scheme, clientset, asw, &plugins) + if vsObj == nil { + t.Errorf("Test failed: could not create volume snapshotter") + } + vs := vsObj.(*volumeSnapshotter) + //func (vs *volumeSnapshotter) createVolumeSnapshotData(snapshotName string, snapshot *crdv1.VolumeSnapshot, snapshotDataSource *crdv1.VolumeSnapshotDataSource, snapStatus *[]crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshotData, error) { + + snapDataSource := crdv1.VolumeSnapshotDataSource{ + HostPath: &crdv1.HostPathVolumeSnapshotSource{ + Path: "/fake/file", + }, + } + snapConditions := []crdv1.VolumeSnapshotCondition{ + { + LastTransitionTime: metav1.Time{}, + Status: v1.ConditionTrue, + Type: crdv1.VolumeSnapshotConditionReady, + }, + } + retData, err := vs.createVolumeSnapshotData("default/new-snapshot-test-1", fakeNewVolumeSnapshot(), &snapDataSource, &snapConditions) + if err != nil { + t.Errorf("Test failed, unexpected error: %v", err) + } + if retData == nil { + t.Errorf("Test failed: faailed to create VolumeSnapshotData") + } +} diff --git a/test.sh b/test.sh index 8eb74c8f66e..c49573e1d08 100755 --- a/test.sh +++ b/test.sh @@ -93,6 +93,7 @@ elif [ "$TEST_SUITE" = "everything-else" ]; then make test-iscsi/targetd make nfs-client make snapshot + make test-snapshot elif [ "$TEST_SUITE" = "local-volume" ]; then make local-volume/provisioner make test-local-volume/provisioner From 3fbfaf0b3e09293d9d80063d6a2ebe293514aa44 Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Mon, 25 Sep 2017 03:23:02 +0000 Subject: [PATCH 03/68] iscsi: Support master and kubeconfig option to run iscsi provisioner locally --- iscsi/targetd/README.md | 8 ++++++++ iscsi/targetd/cmd/start.go | 18 +++++++++++++++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/iscsi/targetd/README.md b/iscsi/targetd/README.md index dff8a6ed62c..0ffcaf41f5b 100644 --- a/iscsi/targetd/README.md +++ b/iscsi/targetd/README.md @@ -241,6 +241,14 @@ oc secret new-basicauth targetd-account --username=admin --password=ciao oc create -f https://raw.githubusercontent.com/kubernetes-incubator/external-storage/master/iscsi/targetd/openshift/iscsi-provisioner-dc.yaml ``` +### Start iscsi provisioner as docker container. + +Alternatively, you can start a provisioner as a container locally. + +```bash +docker run -ti -v /root/.kube:/kube -v /var/run/kubernetes:/var/run/kubernetes --privileged --net=host quay.io/external_storage/iscsi-controller:latest start --kubeconfig=/kube/config --master=http://127.0.0.1:8080 --log-level=debug --targetd-address=192.168.99.100 --targetd-password=ciao --targetd-username=admin +``` + ### Create a storage class storage classes should look like the following diff --git a/iscsi/targetd/cmd/start.go b/iscsi/targetd/cmd/start.go index b8a61e121d8..6d2c4c22032 100644 --- a/iscsi/targetd/cmd/start.go +++ b/iscsi/targetd/cmd/start.go @@ -18,6 +18,7 @@ package cmd import ( "fmt" + "github.com/Sirupsen/logrus" "github.com/kubernetes-incubator/external-storage/iscsi/targetd/provisioner" "github.com/kubernetes-incubator/external-storage/lib/controller" @@ -26,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" ) var log = logrus.New() @@ -42,12 +44,18 @@ This application is a tool to generate the needed files to quickly create a Cobra application.`, Run: func(cmd *cobra.Command, args []string) { initLog() - log.Debugln("start called") - + var config *rest.Config + var err error + master := viper.GetString("master") + kubeconfig := viper.GetString("kubeconfig") // creates the in-cluster config log.Debugln("creating in cluster default kube client config") - config, err := rest.InClusterConfig() + if master != "" || kubeconfig != "" { + config, err = clientcmd.BuildConfigFromFlags(master, kubeconfig) + } else { + config, err = rest.InClusterConfig() + } if err != nil { log.Fatalln(err) } @@ -126,6 +134,10 @@ func init() { startcontrollerCmd.Flags().String("default-fs", "xfs", "filesystem to use when not specified") viper.BindPFlag("default-fs", startcontrollerCmd.Flags().Lookup("default-fs")) + startcontrollerCmd.Flags().String("master", "", "Master URL") + viper.BindPFlag("master", startcontrollerCmd.Flags().Lookup("master")) + startcontrollerCmd.Flags().String("kubeconfig", "", "Absolute path to the kubeconfig") + viper.BindPFlag("kubeconfig", startcontrollerCmd.Flags().Lookup("kubeconfig")) // Here you will define your flags and configuration settings. // Cobra supports Persistent Flags which will work for this command From d44eec15111bd8abcb712101ffeb2c73c9c93ba5 Mon Sep 17 00:00:00 2001 From: Tomas Smetana Date: Tue, 26 Sep 2017 20:50:16 +0200 Subject: [PATCH 04/68] Update dependencies for snapshot tests Added github.com/davecgh/go-spew and golang.org/x/sys to glide.yaml. Run glide up -v && glide-vc --use-lock-file --- glide.lock | 21 +- glide.yaml | 8 + vendor/cloud.google.com/go/.travis.yml | 9 +- vendor/cloud.google.com/go/CONTRIBUTING.md | 34 +- vendor/cloud.google.com/go/CONTRIBUTORS | 3 - vendor/cloud.google.com/go/MIGRATION.md | 54 - vendor/cloud.google.com/go/README.md | 392 +-- vendor/cloud.google.com/go/appveyor.yml | 8 +- .../cloud.google.com/go/authexample_test.go | 35 +- .../go/compute/metadata/metadata.go | 35 +- vendor/cloud.google.com/go/internal/cloud.go | 64 + vendor/cloud.google.com/go/key.json.enc | Bin 2432 -> 1248 bytes vendor/cloud.google.com/go/license_test.go | 2 +- vendor/cloud.google.com/go/old-news.md | 451 --- vendor/cloud.google.com/go/run-tests.sh | 88 - vendor/github.com/Sirupsen/logrus/.travis.yml | 6 +- .../github.com/Sirupsen/logrus/CHANGELOG.md | 7 + vendor/github.com/Sirupsen/logrus/README.md | 129 +- vendor/github.com/Sirupsen/logrus/entry.go | 4 + vendor/github.com/Sirupsen/logrus/exported.go | 6 + .../github.com/Sirupsen/logrus/formatter.go | 4 + .../Sirupsen/logrus/json_formatter.go | 22 +- .../Sirupsen/logrus/json_formatter_test.go | 120 + vendor/github.com/Sirupsen/logrus/logger.go | 84 +- .../github.com/Sirupsen/logrus/logrus_test.go | 24 +- .../Sirupsen/logrus/terminal_notwindows.go | 2 +- .../Sirupsen/logrus/terminal_openbsd.go | 7 + .../Sirupsen/logrus/text_formatter.go | 51 +- .../Sirupsen/logrus/text_formatter_test.go | 30 +- vendor/github.com/Sirupsen/logrus/writer.go | 31 + vendor/github.com/aws/aws-sdk-go/CHANGELOG.md | 40 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 5 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/ec2/api.go | 1103 ++++++- vendor/github.com/davecgh/go-spew/.travis.yml | 9 +- vendor/github.com/davecgh/go-spew/LICENSE | 2 + vendor/github.com/davecgh/go-spew/README.md | 10 +- .../github.com/davecgh/go-spew/spew/bypass.go | 7 +- .../davecgh/go-spew/spew/bypasssafe.go | 7 +- .../github.com/davecgh/go-spew/spew/config.go | 11 +- .../github.com/davecgh/go-spew/spew/dump.go | 6 +- .../davecgh/go-spew/spew/dumpcgo_test.go | 3 +- .../go-spew/spew/internalunsafe_test.go | 7 +- .../davecgh/go-spew/spew/spew_test.go | 11 + vendor/github.com/go-ini/ini/.gitignore | 1 - vendor/github.com/go-ini/ini/.travis.yml | 13 - vendor/github.com/go-ini/ini/Makefile | 12 - vendor/github.com/go-ini/ini/README.md | 198 +- vendor/github.com/go-ini/ini/README_ZH.md | 202 +- vendor/github.com/go-ini/ini/error.go | 32 - vendor/github.com/go-ini/ini/ini.go | 1083 +++++-- vendor/github.com/go-ini/ini/ini_test.go | 599 ++-- vendor/github.com/go-ini/ini/key.go | 699 ---- vendor/github.com/go-ini/ini/key_test.go | 573 ---- vendor/github.com/go-ini/ini/parser.go | 361 --- vendor/github.com/go-ini/ini/parser_test.go | 42 - vendor/github.com/go-ini/ini/section.go | 248 -- vendor/github.com/go-ini/ini/section_test.go | 75 - vendor/github.com/go-ini/ini/struct.go | 278 +- vendor/github.com/go-ini/ini/struct_test.go | 133 +- .../prometheus/client_golang/.travis.yml | 6 +- .../prometheus/client_golang/AUTHORS.md | 18 + .../prometheus/client_golang/CHANGELOG.md | 23 - .../prometheus/client_golang/CONTRIBUTING.md | 6 +- .../prometheus/client_golang/MAINTAINERS.md | 1 - .../prometheus/client_golang/NOTICE | 5 + .../prometheus/client_golang/README.md | 6 +- .../prometheus/client_golang/VERSION | 2 +- .../client_golang/prometheus/README.md | 54 +- .../prometheus/benchmark_test.go | 34 +- .../client_golang/prometheus/collector.go | 52 +- .../client_golang/prometheus/counter.go | 92 +- .../client_golang/prometheus/counter_test.go | 56 - .../client_golang/prometheus/desc.go | 78 +- .../client_golang/prometheus/desc_test.go | 17 - .../client_golang/prometheus/doc.go | 197 +- .../prometheus/example_clustermanager_test.go | 96 +- .../prometheus/example_memstats_test.go | 87 + .../prometheus/example_selfcollector_test.go | 69 + .../prometheus/example_timer_complex_test.go | 71 - .../prometheus/example_timer_gauge_test.go | 48 - .../prometheus/example_timer_test.go | 40 - .../client_golang/prometheus/examples_test.go | 317 +- .../{expvar_collector.go => expvar.go} | 32 +- ...xpvar_collector_test.go => expvar_test.go} | 2 +- .../client_golang/prometheus/fnv.go | 29 - .../client_golang/prometheus/gauge.go | 80 +- .../client_golang/prometheus/gauge_test.go | 20 - .../client_golang/prometheus/go_collector.go | 55 +- .../prometheus/go_collector_test.go | 52 +- .../client_golang/prometheus/histogram.go | 95 +- .../prometheus/histogram_test.go | 26 +- .../client_golang/prometheus/http.go | 243 +- .../client_golang/prometheus/http_test.go | 43 +- .../client_golang/prometheus/labels.go | 57 - .../client_golang/prometheus/metric.go | 34 +- .../client_golang/prometheus/observer.go | 50 - .../prometheus/process_collector.go | 108 +- .../prometheus/process_collector_test.go | 60 +- .../client_golang/prometheus/push.go | 65 + .../client_golang/prometheus/registry.go | 938 +++--- .../client_golang/prometheus/registry_test.go | 183 +- .../client_golang/prometheus/summary.go | 127 +- .../client_golang/prometheus/summary_test.go | 57 +- .../client_golang/prometheus/timer.go | 51 - .../client_golang/prometheus/timer_test.go | 152 - .../client_golang/prometheus/untyped.go | 109 +- .../client_golang/prometheus/value.go | 22 +- .../client_golang/prometheus/value_test.go | 43 - .../client_golang/prometheus/vec.go | 352 +- .../client_golang/prometheus/vec_test.go | 265 +- vendor/github.com/spf13/viper/.gitignore | 1 - vendor/github.com/spf13/viper/.travis.yml | 6 +- vendor/github.com/spf13/viper/README.md | 85 +- vendor/github.com/spf13/viper/flags_test.go | 3 +- .../github.com/spf13/viper/overrides_test.go | 173 - vendor/github.com/spf13/viper/util.go | 118 +- vendor/github.com/spf13/viper/util_test.go | 54 - vendor/github.com/spf13/viper/viper.go | 824 ++--- vendor/github.com/spf13/viper/viper_test.go | 390 +-- .../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 - vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 -- vendor/golang.org/x/sys/unix/creds_test.go | 183 +- vendor/golang.org/x/sys/unix/dev_darwin.go | 24 - .../golang.org/x/sys/unix/dev_darwin_test.go | 49 - vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 - .../x/sys/unix/dev_dragonfly_test.go | 48 - vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 - vendor/golang.org/x/sys/unix/dev_linux.go | 42 - .../golang.org/x/sys/unix/dev_linux_test.go | 51 - vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 - .../golang.org/x/sys/unix/dev_netbsd_test.go | 50 - vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 - .../golang.org/x/sys/unix/dev_openbsd_test.go | 52 - .../golang.org/x/sys/unix/dev_solaris_test.go | 49 - .../x/sys/unix/errors_freebsd_386.go | 227 -- .../x/sys/unix/errors_freebsd_amd64.go | 227 -- .../x/sys/unix/errors_freebsd_arm.go | 226 -- vendor/golang.org/x/sys/unix/file_unix.go | 27 - vendor/golang.org/x/sys/unix/flock.go | 2 + .../x/sys/unix/gccgo_linux_sparc64.go | 20 + vendor/golang.org/x/sys/unix/mkall.sh | 22 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 29 +- .../golang.org/x/sys/unix/mksysnum_freebsd.pl | 13 + .../golang.org/x/sys/unix/mksysnum_netbsd.pl | 2 +- .../golang.org/x/sys/unix/mmap_unix_test.go | 12 - vendor/golang.org/x/sys/unix/pagesize_unix.go | 15 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 31 +- .../golang.org/x/sys/unix/syscall_darwin.go | 65 +- .../x/sys/unix/syscall_darwin_386.go | 2 + .../x/sys/unix/syscall_darwin_amd64.go | 4 + .../x/sys/unix/syscall_darwin_arm.go | 2 + .../x/sys/unix/syscall_darwin_arm64.go | 2 + .../x/sys/unix/syscall_dragonfly.go | 12 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 2 + .../golang.org/x/sys/unix/syscall_freebsd.go | 64 +- .../x/sys/unix/syscall_freebsd_386.go | 2 + .../x/sys/unix/syscall_freebsd_amd64.go | 2 + .../x/sys/unix/syscall_freebsd_arm.go | 2 + .../x/sys/unix/syscall_freebsd_test.go | 279 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 91 +- .../x/sys/unix/syscall_linux_386.go | 2 + .../x/sys/unix/syscall_linux_amd64.go | 2 + .../x/sys/unix/syscall_linux_arm.go | 2 + .../x/sys/unix/syscall_linux_arm64.go | 2 + .../x/sys/unix/syscall_linux_mips64x.go | 2 + .../x/sys/unix/syscall_linux_mipsx.go | 2 + .../x/sys/unix/syscall_linux_ppc64x.go | 2 + .../x/sys/unix/syscall_linux_s390x.go | 2 + .../x/sys/unix/syscall_linux_sparc64.go | 20 + .../x/sys/unix/syscall_linux_test.go | 41 - .../golang.org/x/sys/unix/syscall_netbsd.go | 6 +- .../x/sys/unix/syscall_netbsd_386.go | 2 + .../x/sys/unix/syscall_netbsd_amd64.go | 2 + .../x/sys/unix/syscall_netbsd_arm.go | 2 + .../golang.org/x/sys/unix/syscall_openbsd.go | 7 +- .../x/sys/unix/syscall_openbsd_386.go | 2 + .../x/sys/unix/syscall_openbsd_amd64.go | 2 + .../x/sys/unix/syscall_openbsd_arm.go | 40 - .../golang.org/x/sys/unix/syscall_solaris.go | 54 +- vendor/golang.org/x/sys/unix/types_darwin.go | 4 - .../golang.org/x/sys/unix/types_dragonfly.go | 7 - vendor/golang.org/x/sys/unix/types_freebsd.go | 19 - vendor/golang.org/x/sys/unix/types_netbsd.go | 7 - vendor/golang.org/x/sys/unix/types_openbsd.go | 7 - vendor/golang.org/x/sys/unix/types_solaris.go | 4 + .../x/sys/unix/zerrors_darwin_386.go | 103 +- .../x/sys/unix/zerrors_darwin_amd64.go | 103 +- .../x/sys/unix/zerrors_darwin_arm.go | 392 +-- .../x/sys/unix/zerrors_darwin_arm64.go | 103 +- .../x/sys/unix/zerrors_freebsd_386.go | 2865 ++++++++-------- .../x/sys/unix/zerrors_freebsd_amd64.go | 2871 +++++++++-------- .../x/sys/unix/zerrors_freebsd_arm.go | 2860 ++++++++-------- .../x/sys/unix/zerrors_linux_386.go | 115 +- .../x/sys/unix/zerrors_linux_amd64.go | 115 +- .../x/sys/unix/zerrors_linux_arm.go | 115 +- .../x/sys/unix/zerrors_linux_arm64.go | 116 +- .../x/sys/unix/zerrors_linux_mips.go | 114 +- .../x/sys/unix/zerrors_linux_mips64.go | 114 +- .../x/sys/unix/zerrors_linux_mips64le.go | 114 +- .../x/sys/unix/zerrors_linux_mipsle.go | 114 +- .../x/sys/unix/zerrors_linux_ppc64.go | 115 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 115 +- .../x/sys/unix/zerrors_linux_s390x.go | 115 +- .../x/sys/unix/zerrors_netbsd_arm.go | 3 - .../x/sys/unix/zerrors_openbsd_arm.go | 1586 --------- .../x/sys/unix/zerrors_solaris_amd64.go | 6 - .../x/sys/unix/zsyscall_darwin_386.go | 329 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 344 +- .../x/sys/unix/zsyscall_darwin_arm.go | 333 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 329 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 183 +- .../x/sys/unix/zsyscall_freebsd_386.go | 378 +-- .../x/sys/unix/zsyscall_freebsd_amd64.go | 378 +-- .../x/sys/unix/zsyscall_freebsd_arm.go | 378 +-- .../x/sys/unix/zsyscall_linux_386.go | 180 +- .../x/sys/unix/zsyscall_linux_amd64.go | 180 +- .../x/sys/unix/zsyscall_linux_arm.go | 180 +- .../x/sys/unix/zsyscall_linux_arm64.go | 180 +- .../x/sys/unix/zsyscall_linux_mips.go | 180 +- .../x/sys/unix/zsyscall_linux_mips64.go | 180 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 180 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 180 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 180 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 180 +- .../x/sys/unix/zsyscall_linux_s390x.go | 180 +- .../x/sys/unix/zsyscall_netbsd_386.go | 185 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 185 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 187 +- .../x/sys/unix/zsyscall_openbsd_386.go | 185 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 185 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 1404 -------- .../x/sys/unix/zsyscall_solaris_amd64.go | 69 +- .../x/sys/unix/zsysnum_darwin_arm.go | 110 +- .../x/sys/unix/zsysnum_darwin_arm64.go | 50 +- .../x/sys/unix/zsysnum_freebsd_386.go | 686 ++-- .../x/sys/unix/zsysnum_freebsd_amd64.go | 686 ++-- .../x/sys/unix/zsysnum_freebsd_arm.go | 686 ++-- .../x/sys/unix/zsysnum_linux_386.go | 2 - .../x/sys/unix/zsysnum_linux_amd64.go | 1 - .../x/sys/unix/zsysnum_linux_arm.go | 1 - .../x/sys/unix/zsysnum_linux_arm64.go | 1 - .../x/sys/unix/zsysnum_linux_mips.go | 1 - .../x/sys/unix/zsysnum_linux_mips64.go | 1 - .../x/sys/unix/zsysnum_linux_mips64le.go | 1 - .../x/sys/unix/zsysnum_linux_mipsle.go | 1 - .../x/sys/unix/zsysnum_linux_ppc64.go | 1 - .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 - .../x/sys/unix/zsysnum_linux_s390x.go | 2 - .../x/sys/unix/zsysnum_netbsd_386.go | 1 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm.go | 213 -- .../x/sys/unix/ztypes_darwin_386.go | 19 +- .../x/sys/unix/ztypes_darwin_amd64.go | 14 +- .../x/sys/unix/ztypes_darwin_arm.go | 14 - .../x/sys/unix/ztypes_darwin_arm64.go | 14 - .../x/sys/unix/ztypes_dragonfly_amd64.go | 5 - .../x/sys/unix/ztypes_freebsd_386.go | 79 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 75 +- .../x/sys/unix/ztypes_freebsd_arm.go | 87 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 123 +- .../x/sys/unix/ztypes_linux_amd64.go | 123 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 123 +- .../x/sys/unix/ztypes_linux_arm64.go | 123 +- .../x/sys/unix/ztypes_linux_mips.go | 123 +- .../x/sys/unix/ztypes_linux_mips64.go | 123 +- .../x/sys/unix/ztypes_linux_mips64le.go | 123 +- .../x/sys/unix/ztypes_linux_mipsle.go | 123 +- .../x/sys/unix/ztypes_linux_ppc64.go | 123 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 123 +- .../x/sys/unix/ztypes_linux_s390x.go | 123 +- .../x/sys/unix/ztypes_netbsd_386.go | 5 - .../x/sys/unix/ztypes_netbsd_amd64.go | 5 - .../x/sys/unix/ztypes_netbsd_arm.go | 5 - .../x/sys/unix/ztypes_openbsd_386.go | 5 - .../x/sys/unix/ztypes_openbsd_amd64.go | 5 - .../x/sys/unix/ztypes_openbsd_arm.go | 439 --- .../x/sys/unix/ztypes_solaris_amd64.go | 2 + 279 files changed, 12178 insertions(+), 27977 deletions(-) delete mode 100644 vendor/cloud.google.com/go/MIGRATION.md create mode 100644 vendor/cloud.google.com/go/internal/cloud.go delete mode 100644 vendor/cloud.google.com/go/old-news.md delete mode 100755 vendor/cloud.google.com/go/run-tests.sh create mode 100644 vendor/github.com/Sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_openbsd.go create mode 100644 vendor/github.com/Sirupsen/logrus/writer.go delete mode 100644 vendor/github.com/go-ini/ini/.travis.yml delete mode 100644 vendor/github.com/go-ini/ini/Makefile delete mode 100644 vendor/github.com/go-ini/ini/error.go delete mode 100644 vendor/github.com/go-ini/ini/key.go delete mode 100644 vendor/github.com/go-ini/ini/key_test.go delete mode 100644 vendor/github.com/go-ini/ini/parser.go delete mode 100644 vendor/github.com/go-ini/ini/parser_test.go delete mode 100644 vendor/github.com/go-ini/ini/section.go delete mode 100644 vendor/github.com/go-ini/ini/section_test.go create mode 100644 vendor/github.com/prometheus/client_golang/AUTHORS.md delete mode 100644 vendor/github.com/prometheus/client_golang/MAINTAINERS.md delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_memstats_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go rename vendor/github.com/prometheus/client_golang/prometheus/{expvar_collector.go => expvar.go} (81%) rename vendor/github.com/prometheus/client_golang/prometheus/{expvar_collector_test.go => expvar_test.go} (98%) delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value_test.go delete mode 100644 vendor/github.com/spf13/viper/overrides_test.go delete mode 100644 vendor/github.com/spf13/viper/util_test.go delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_darwin_test.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly_test.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_linux_test.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd_test.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd_test.go delete mode 100644 vendor/golang.org/x/sys/unix/dev_solaris_test.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/file_unix.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go delete mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go diff --git a/glide.lock b/glide.lock index 5c27f4f37e0..802efcea3b4 100644 --- a/glide.lock +++ b/glide.lock @@ -1,12 +1,13 @@ -hash: c93b7bb90a59a66ebfa93614b4d3bc73a46d973db4fcc5212ab1b9da8261132c -updated: 2017-09-20T20:27:26.578282752-04:00 +hash: 1626357572d7723d44f5c3d8ba27e94a6c95a306ff8906f9d79a2e2711d5af9f +updated: 2017-09-26T20:49:30.595112131+02:00 imports: - name: cloud.google.com/go - version: 6914032ea644de194c5f05cc04124f111396531c + version: 3b1ae45394a234c385be014e9a488f2bb6eef821 subpackages: - compute/metadata + - internal - name: github.com/aws/aws-sdk-go - version: d76e48be81c125ffb3d030020b66d64731d2b9d0 + version: b69f447375c7fa0047ebcdd8ae5d585d5aac2f71 subpackages: - aws - aws/awserr @@ -45,7 +46,7 @@ imports: subpackages: - quantile - name: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 subpackages: - spew - name: github.com/dgrijalva/jwt-go @@ -100,7 +101,7 @@ imports: - name: github.com/ghodss/yaml version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - name: github.com/go-ini/ini - version: c787282c39ac1fc618827141a1f762240def08a3 + version: 2e44421e256d82ebbf3d4d4fcabe8930b905eff3 - name: github.com/go-openapi/analysis version: b44dc874b601d9e4e2f6e19140e794ba24bead3b - name: github.com/go-openapi/jsonpointer @@ -228,7 +229,7 @@ imports: subpackages: - jsonrpc2 - name: github.com/prometheus/client_golang - version: 50b3332fd63be43e38472600ec187ceec39d26d6 + version: e51041b3fa41cece0dca035740ba6411905be473 subpackages: - prometheus - name: github.com/prometheus/client_model @@ -250,7 +251,7 @@ imports: - name: github.com/PuerkitoBio/urlesc version: 5bd2802263f21d8788851d5305584c82a5c75d7e - name: github.com/Sirupsen/logrus - version: 51fe59aca108dc5680109e7b2051cbdcfa5a253c + version: 26709e2714106fb8ad40b773b711ebce25b78914 - name: github.com/spf13/afero version: b28a7effac979219c2a2ed6205a4d70e4b1bcd02 subpackages: @@ -265,7 +266,7 @@ imports: - name: github.com/spf13/pflag version: 5ccb023bc27df288a957c5e994cd44fd19619465 - name: github.com/spf13/viper - version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 + version: 7fb2782df3d83e0036cc89f461ed0422628776f4 - name: github.com/ugorji/go version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74 subpackages: @@ -295,7 +296,7 @@ imports: - jws - jwt - name: golang.org/x/sys - version: b6e1ae21643682ce023deb8d152024597b0e9bb4 + version: a2e06a18b0d52d8cb2010e04b372a1965d8e3439 subpackages: - unix - name: golang.org/x/text diff --git a/glide.yaml b/glide.yaml index 356c66a3455..49d34fb738b 100644 --- a/glide.yaml +++ b/glide.yaml @@ -43,3 +43,11 @@ import: - jsonrpc2 - package: github.com/gophercloud/gophercloud version: b4c2377fa77951a0e08163f52dc9b3e206355194 +- package: github.com/davecgh/go-spew + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + supbackages: + - spew +- package: golang.org/x/sys + version: a2e06a18 + subpackages: + - unix diff --git a/vendor/cloud.google.com/go/.travis.yml b/vendor/cloud.google.com/go/.travis.yml index 8c769d71f09..76847fcc3f7 100644 --- a/vendor/cloud.google.com/go/.travis.yml +++ b/vendor/cloud.google.com/go/.travis.yml @@ -3,14 +3,9 @@ language: go go: - 1.6 - 1.7 -- 1.8 install: - go get -v cloud.google.com/go/... script: -- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in key.json.enc -out key.json -d +- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d - GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" - ./run-tests.sh $TRAVIS_COMMIT -env: - matrix: - # The GCLOUD_TESTS_API_KEY environment variable. - secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI= + go test -race -v cloud.google.com/go/... diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index aed05dc4fc2..59fb44df731 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -2,14 +2,6 @@ 1. Sign one of the contributor license agreements below. 1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. - 1. You will need to ensure that your `GOBIN` directory (by default - `$GOPATH/bin`) is in your `PATH` so that git can find the command. - 1. If you would like, you may want to set up aliases for git-codereview, - such that `git codereview change` becomes `git change`. See the - [godoc](https://godoc.org/golang.org/x/review/git-codereview) for details. - 1. Should you run into issues with the git-codereview tool, please note - that all error messages will assume that you have set up these - aliases. 1. Get the cloud package by running `go get -d cloud.google.com/go`. 1. If you have already checked out the source, make sure that the remote git origin is https://code.googlesource.com/gocloud: @@ -20,8 +12,7 @@ the directions. 1. Make changes and create a change by running `git codereview change `, provide a commit message, and use `git codereview mail` to create a Gerrit CL. -1. Keep amending to the change with `git codereview change` and mail as your receive -feedback. Each new mailed amendment will create a new patch set for your change in Gerrit. +1. Keep amending to the change and mail as your receive feedback. ## Integration Tests @@ -40,33 +31,30 @@ run the against the actual APIs. - **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) - **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. -- **GCLOUD_TESTS_API_KEY**: Your API key. Install the [gcloud command-line tool][gcloudcli] to your machine and use it -to create some resources used in integration tests. +to create the indexes used in the datastore integration tests with indexes +found in `datastore/testdata/index.yaml`: From the project's root directory: ``` sh -# Set the default project in your env. +# Set the default project in your env $ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID -# Authenticate the gcloud tool with your account. +# Authenticate the gcloud tool with your account $ gcloud auth login -# Create the indexes used in the datastore integration tests. +# Create the indexes $ gcloud preview datastore create-indexes datastore/testdata/index.yaml +``` -# Create a Google Cloud storage bucket with the same name as your test project, -# and with the Stackdriver Logging service account as owner, for the sink -# integration tests in logging. +The Sink integration tests in preview/logging require a Google Cloud storage +bucket with the same name as your test project, and with the Stackdriver Logging +service account as owner: +``` sh $ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID - -# Create a Spanner instance for the spanner integration tests. -$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test' -# NOTE: Spanner instances are priced by the node-hour, so you may want to delete -# the instance after testing with 'gcloud beta spanner instances delete'. ``` Once you've set the environment variables, you can run the integration tests by diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS index d4b376c7cc3..07509ccb7c9 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTORS +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -11,7 +11,6 @@ # Keep the list alphabetically sorted. -Alexis Hunt Andreas Litt Andrew Gerrand Brad Fitzpatrick @@ -25,13 +24,11 @@ Ingo Oeser Johan Euphrosine Jonathan Amsterdam Luna Duclos -Magnus Hiie Michael McGreevy Omar Jarjur Paweł Knap Péter Szilágyi Sarah Adams -Thanatat Tamtan Toby Burress Tuo Shan Tyler Treat diff --git a/vendor/cloud.google.com/go/MIGRATION.md b/vendor/cloud.google.com/go/MIGRATION.md deleted file mode 100644 index 791210de197..00000000000 --- a/vendor/cloud.google.com/go/MIGRATION.md +++ /dev/null @@ -1,54 +0,0 @@ -# Code Changes - -## v0.10.0 - -- pubsub: Replace - - ``` - sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"}) - ``` - - with - - ``` - sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ - PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, - }) - ``` - -- trace: traceGRPCServerInterceptor will be provided from *trace.Client. -Given an initialized `*trace.Client` named `tc`, instead of - - ``` - s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc))) - ``` - - write - - ``` - s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) - ``` - -- trace trace.GRPCClientInterceptor will also provided from *trace.Client. -Instead of - - ``` - conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor())) - ``` - - write - - ``` - conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) - ``` - -- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC -interceptor as a dial option as shown below when initializing Cloud package -clients: - - ``` - c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))) - if err != nil { - ... - } - ``` diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index 91b1be2d178..5e74c46a9dd 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -1,142 +1,47 @@ -# Google Cloud Client Libraries for Go +# Google Cloud for Go +[![Build Status](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go) [![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go) -Go packages for [Google Cloud Platform](https://cloud.google.com) services. - ``` go import "cloud.google.com/go" ``` -To install the packages on your system, - -``` -$ go get -u cloud.google.com/go/... -``` +Go packages for Google Cloud Platform services. -**NOTE:** Some of these packages are under development, and may occasionally -make backwards-incompatible changes. +**NOTE:** These packages are under development, and may occasionally make +backwards-incompatible changes. **NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). - * [News](#news) - * [Supported APIs](#supported-apis) - * [Go Versions Supported](#go-versions-supported) - * [Authorization](#authorization) - * [Cloud Datastore](#cloud-datastore-) - * [Cloud Storage](#cloud-storage-) - * [Cloud Pub/Sub](#cloud-pub-sub-) - * [Cloud BigQuery](#cloud-bigquery-) - * [Stackdriver Logging](#stackdriver-logging-) - * [Cloud Spanner](#cloud-spanner-) - - ## News -_September 8, 2017_ - -*v0.13.0* - -- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these - options to continue using Legacy SQL after the client switches its default - to Standard SQL. - -- bigquery: Support for updating dataset labels. - -- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other - than the client's. DatasetsInProject is no longer needed and is deprecated. - -- bigtable: Fail ListInstances when any zones fail. - -- spanner: support decoding of slices of basic types (e.g. []string, []int64, - etc.) - -- logging/logadmin: UpdateSink no longer creates a sink if it is missing - (actually a change to the underlying service, not the client) - -- profiler: Service and ServiceVersion replace Target in Config. - -_August 22, 2017_ - -*v0.12.0* - -- pubsub: Subscription.Receive now uses streaming pull. - -- pubsub: add Client.TopicInProject to access topics in a different project - than the client. - -- errors: renamed errorreporting. The errors package will be removed shortly. - -- datastore: improved retry behavior. - -- bigquery: support updates to dataset metadata, with etags. - -- bigquery: add etag support to Table.Update (BREAKING: etag argument added). - -- bigquery: generate all job IDs on the client. - -- storage: support bucket lifecycle configurations. - - -_July 31, 2017_ - -*v0.11.0* - -- Clients for spanner, pubsub and video are now in beta. +_September 8, 2016_ -- New client for DLP. +* New clients for some of Google's Machine Learning APIs: Vision, Speech, and +Natural Language. -- spanner: performance and testing improvements. - -- storage: requester-pays buckets are supported. - -- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements. - -- pubsub: bug fixes and other minor improvements - -_June 17, 2017_ - - -*v0.10.0* - -- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update. - -- pubsub: Subscription.Receive now runs concurrently for higher throughput. - -- vision: cloud.google.com/go/vision is deprecated. Use -cloud.google.com/go/vision/apiv1 instead. - -- translation: now stable. - -- trace: several changes to the surface. See the link below. - -[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md) +* Preview version of a new [Stackdriver Logging][cloud-logging] client in +[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). +This client uses gRPC as its transport layer, and supports log reading, sinks +and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. +## Supported APIs -[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md) +Google API | Status | Package +-------------------------------|--------------|----------------------------------------------------------- +[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref] +[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref] +[Pub/Sub][cloud-pubsub] | experimental | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] +[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] +[BigQuery][cloud-bigquery] | experimental | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] +[Logging][cloud-logging] | experimental | [`cloud.google.com/go/logging`][cloud-logging-ref] +[Vision][cloud-vision] | experimental | [`cloud.google.com/go/vision`][cloud-vision-ref] +[Language][cloud-language] | experimental | [`cloud.google.com/go/language/apiv1beta1`][cloud-language-ref] +[Speech][cloud-speech] | experimental | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref] -## Supported APIs -Google API | Status | Package ----------------------------------|--------------|----------------------------------------------------------- -[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref] -[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref] -[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] -[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] -[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref] -[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref] -[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] -[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref] -[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref] -[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref] -[Spanner][cloud-spanner] | beta | [`cloud.google.com/go/spanner`][cloud-spanner-ref] -[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref] -[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref] -[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref] -[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref] - - -> **Alpha status**: the API is still being actively developed. As a +> **Experimental status**: the API is still being actively developed. As a > result, it might change in backward-incompatible ways and is not recommended > for production use. > @@ -167,56 +72,35 @@ By default, each API will use [Google Application Default Credentials][default-c for authorization credentials used in calling the API endpoints. This will allow your application to run in many environments without requiring explicit configuration. -[snip]:# (auth) -```go -client, err := storage.NewClient(ctx) -``` - -To authorize using a -[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), -pass -[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile) -to the `NewClient` function of the desired package. For example: - -[snip]:# (auth-JSON) -```go -client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) -``` - -You can exert more control over authorization by using the +Manually-configured authorization can be achieved using the [`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to -create an `oauth2.TokenSource`. Then pass +create an `oauth2.TokenSource`. This token source can be passed to the `NewClient` +function for the relevant API using a [`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource) -to the `NewClient` function: -[snip]:# (auth-ts) -```go -tokenSource := ... -client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) -``` +option. -## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) +## Google Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) -- [About Cloud Datastore][cloud-datastore] -- [Activating the API for your project][cloud-datastore-activation] -- [API documentation][cloud-datastore-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore) -- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks) +[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully- +managed, schemaless database for storing non-relational data. Cloud Datastore +automatically scales with your users and supports ACID transactions, high availability +of reads and writes, strong consistency for reads and ancestor queries, and eventual +consistency for all other queries. -### Example Usage +Follow the [activation instructions][cloud-datastore-activation] to use the Google +Cloud Datastore API with your project. First create a `datastore.Client` to use throughout your application: -[snip]:# (datastore-1) ```go client, err := datastore.NewClient(ctx, "my-project-id") if err != nil { - log.Fatal(err) + log.Fatalln(err) } ``` Then use that client to interact with the API: -[snip]:# (datastore-2) ```go type Post struct { Title string @@ -224,8 +108,8 @@ type Post struct { PublishedAt time.Time } keys := []*datastore.Key{ - datastore.NameKey("Post", "post1", nil), - datastore.NameKey("Post", "post2", nil), + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), } posts := []*Post{ {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, @@ -236,18 +120,16 @@ if _, err := client.PutMulti(ctx, keys, posts); err != nil { } ``` -## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) +## Google Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) -- [About Cloud Storage][cloud-storage] -- [API documentation][cloud-storage-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) +[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store +data on Google infrastructure with very high reliability, performance and availability, +and can be used to distribute large data objects to users via direct download. -### Example Usage +https://godoc.org/cloud.google.com/go/storage First create a `storage.Client` to use throughout your application: -[snip]:# (storage-1) ```go client, err := storage.NewClient(ctx) if err != nil { @@ -255,7 +137,6 @@ if err != nil { } ``` -[snip]:# (storage-2) ```go // Read the object1 from bucket. rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) @@ -269,18 +150,15 @@ if err != nil { } ``` -## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) +## Google Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) -- [About Cloud Pubsub][cloud-pubsub] -- [API documentation][cloud-pubsub-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) - -### Example Usage +[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect +your services with reliable, many-to-many, asynchronous messaging hosted on Google's +infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation +for building your own robust, global services. First create a `pubsub.Client` to use throughout your application: -[snip]:# (pubsub-1) ```go client, err := pubsub.NewClient(ctx, "project-id") if err != nil { @@ -288,155 +166,38 @@ if err != nil { } ``` -Then use the client to publish and subscribe: - -[snip]:# (pubsub-2) ```go // Publish "hello world" on topic1. topic := client.Topic("topic1") -res := topic.Publish(ctx, &pubsub.Message{ +msgIDs, err := topic.Publish(ctx, &pubsub.Message{ Data: []byte("hello world"), }) -// The publish happens asynchronously. -// Later, you can get the result from res: -... -msgID, err := res.Get(ctx) if err != nil { log.Fatal(err) } -// Use a callback to receive messages via subscription1. -sub := client.Subscription("subscription1") -err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { - fmt.Println(m.Data) - m.Ack() // Acknowledge that we've consumed the message. -}) +// Create an iterator to pull messages via subscription1. +it, err := client.Subscription("subscription1").Pull(ctx) if err != nil { log.Println(err) } -``` - -## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery) - -- [About Cloud BigQuery][cloud-bigquery] -- [API documentation][cloud-bigquery-docs] -- [Go client documentation][cloud-bigquery-ref] -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery) - -### Example Usage - -First create a `bigquery.Client` to use throughout your application: -[snip]:# (bq-1) -```go -c, err := bigquery.NewClient(ctx, "my-project-ID") -if err != nil { - // TODO: Handle error. -} -``` +defer it.Stop() -Then use that client to interact with the API: -[snip]:# (bq-2) -```go -// Construct a query. -q := c.Query(` - SELECT year, SUM(number) - FROM [bigquery-public-data:usa_names.usa_1910_2013] - WHERE name = "William" - GROUP BY year - ORDER BY year -`) -// Execute the query. -it, err := q.Read(ctx) -if err != nil { - // TODO: Handle error. -} -// Iterate through the results. -for { - var values []bigquery.Value - err := it.Next(&values) - if err == iterator.Done { +// Consume N messages from the iterator. +for i := 0; i < N; i++ { + msg, err := it.Next() + if err == pubsub.Done { break } if err != nil { - // TODO: Handle error. + log.Fatalf("Failed to retrieve message: %v", err) } - fmt.Println(values) -} -``` - - -## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging) - -- [About Stackdriver Logging][cloud-logging] -- [API documentation][cloud-logging-docs] -- [Go client documentation][cloud-logging-ref] -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) -### Example Usage - -First create a `logging.Client` to use throughout your application: -[snip]:# (logging-1) -```go -ctx := context.Background() -client, err := logging.NewClient(ctx, "my-project") -if err != nil { - // TODO: Handle error. + fmt.Printf("Message %d: %s\n", i, msg.Data) + msg.Done(true) // Acknowledge that we've consumed the message. } ``` -Usually, you'll want to add log entries to a buffer to be periodically flushed -(automatically and asynchronously) to the Stackdriver Logging service. -[snip]:# (logging-2) -```go -logger := client.Logger("my-log") -logger.Log(logging.Entry{Payload: "something happened!"}) -``` - -Close your client before your program exits, to flush any buffered log entries. -[snip]:# (logging-3) -```go -err = client.Close() -if err != nil { - // TODO: Handle error. -} -``` - -## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner) - -- [About Cloud Spanner][cloud-spanner] -- [API documentation][cloud-spanner-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner) - -### Example Usage - -First create a `spanner.Client` to use throughout your application: - -[snip]:# (spanner-1) -```go -client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") -if err != nil { - log.Fatal(err) -} -``` - -[snip]:# (spanner-2) -```go -// Simple Reads And Writes -_, err = client.Apply(ctx, []*spanner.Mutation{ - spanner.Insert("Users", - []string{"name", "email"}, - []interface{}{"alice", "a@example.com"})}) -if err != nil { - log.Fatal(err) -} -row, err := client.Single().ReadRow(ctx, "Users", - spanner.Key{"alice"}, []string{"email"}) -if err != nil { - log.Fatal(err) -} -``` - - ## Contributing Contributions are welcome. Please, see the @@ -460,46 +221,25 @@ for more information. [cloud-storage]: https://cloud.google.com/storage/ [cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage -[cloud-storage-docs]: https://cloud.google.com/storage/docs +[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview [cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets [cloud-bigtable]: https://cloud.google.com/bigtable/ [cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable [cloud-bigquery]: https://cloud.google.com/bigquery/ -[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs [cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery [cloud-logging]: https://cloud.google.com/logging/ -[cloud-logging-docs]: https://cloud.google.com/logging/docs [cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging -[cloud-monitoring]: https://cloud.google.com/monitoring/ -[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3 - -[cloud-vision]: https://cloud.google.com/vision -[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1 +[cloud-vision]: https://cloud.google.com/vision/ +[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision [cloud-language]: https://cloud.google.com/natural-language -[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1 +[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1beta1 [cloud-speech]: https://cloud.google.com/speech -[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1 - -[cloud-spanner]: https://cloud.google.com/spanner/ -[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner -[cloud-spanner-docs]: https://cloud.google.com/spanner/docs - -[cloud-translation]: https://cloud.google.com/translation -[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation - -[cloud-trace]: https://cloud.google.com/trace/ -[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace - -[cloud-video]: https://cloud.google.com/video-intelligence/ -[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1 - -[cloud-errors]: https://cloud.google.com/error-reporting/ -[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting +[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1 [default-creds]: https://developers.google.com/identity/protocols/application-default-credentials diff --git a/vendor/cloud.google.com/go/appveyor.yml b/vendor/cloud.google.com/go/appveyor.yml index e66cd00af4f..f9ba7747613 100644 --- a/vendor/cloud.google.com/go/appveyor.yml +++ b/vendor/cloud.google.com/go/appveyor.yml @@ -10,10 +10,6 @@ clone_folder: c:\gopath\src\cloud.google.com\go environment: GOPATH: c:\gopath - GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762 - GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json - KEYFILE_CONTENTS: - secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje install: # Info for debugging. @@ -22,11 +18,9 @@ install: - go env - go get -v -d -t ./... - # Provide a build script, or AppVeyor will call msbuild. build_script: - go install -v ./... - - echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY% test_script: - - go test -v ./... + - go test -short -v ./... diff --git a/vendor/cloud.google.com/go/authexample_test.go b/vendor/cloud.google.com/go/authexample_test.go index fe75467f94c..528be133b39 100644 --- a/vendor/cloud.google.com/go/authexample_test.go +++ b/vendor/cloud.google.com/go/authexample_test.go @@ -21,29 +21,40 @@ import ( ) func Example_applicationDefaultCredentials() { - // Google Application Default Credentials is the recommended way to authorize - // and authenticate clients. - // - // See the following link on how to create and obtain Application Default Credentials: + ctx := context.Background() + // Use Google Application Default Credentials to authorize and authenticate the client. + // More information about Application Default Credentials and how to enable is at // https://developers.google.com/identity/protocols/application-default-credentials. - client, err := datastore.NewClient(context.Background(), "project-id") + // + // This is the recommended way of authorizing and authenticating. + // + // Note: The example uses the datastore client, but the same steps apply to + // the other client libraries underneath this package. + client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: handle error. } - _ = client // Use the client. + // Use the client. + _ = client } func Example_serviceAccountFile() { + // Warning: The better way to use service accounts is to set GOOGLE_APPLICATION_CREDENTIALS + // and use the Application Default Credentials. + ctx := context.Background() // Use a JSON key file associated with a Google service account to - // authenticate and authorize. Service Account keys can be created and - // downloaded from https://console.developers.google.com/permissions/serviceaccounts. + // authenticate and authorize. + // Go to https://console.developers.google.com/permissions/serviceaccounts to create + // and download a service account key for your project. // - // Note: This example uses the datastore client, but the same steps apply to + // Note: The example uses the datastore client, but the same steps apply to // the other client libraries underneath this package. - client, err := datastore.NewClient(context.Background(), - "project-id", option.WithServiceAccountFile("/path/to/service-account-key.json")) + client, err := datastore.NewClient(ctx, + "project-id", + option.WithServiceAccountFile("/path/to/service-account-key.json")) if err != nil { // TODO: handle error. } - _ = client // Use the client. + // Use the client. + _ = client } diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index e708c031b95..f9d2bef6c2f 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -34,6 +34,8 @@ import ( "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" + + "cloud.google.com/go/internal" ) const ( @@ -46,8 +48,6 @@ const ( // This is variable name is not defined by any spec, as far as // I know; it was made up for the Go package. metadataHostEnv = "GCE_METADATA_HOST" - - userAgent = "gcloud-golang/0.1" ) type cachedValue struct { @@ -65,20 +65,24 @@ var ( var ( metaClient = &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, }, } subscribeClient = &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, }, } ) @@ -128,7 +132,6 @@ func getETag(client *http.Client, suffix string) (value, etag string, err error) url := "http://" + host + "/computeMetadata/v1/" + suffix req, _ := http.NewRequest("GET", url, nil) req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) res, err := client.Do(req) if err != nil { return "", "", err @@ -199,9 +202,7 @@ func testOnGCE() bool { // Try two strategies in parallel. // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 go func() { - req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) - req.Header.Set("User-Agent", userAgent) - res, err := ctxhttp.Do(ctx, metaClient, req) + res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) if err != nil { resc <- false return diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go new file mode 100644 index 00000000000..8e0c8f8e525 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/cloud.go @@ -0,0 +1,64 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" +) + +const userAgent = "gcloud-golang/0.1" + +// Transport is an http.RoundTripper that appends Google Cloud client's +// user-agent to the original request's user-agent header. +type Transport struct { + // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. + // Do User-Agent some other way. + + // Base is the actual http.RoundTripper + // requests will use. It must not be nil. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} diff --git a/vendor/cloud.google.com/go/key.json.enc b/vendor/cloud.google.com/go/key.json.enc index a8978a9015b90ec780d8a0d55273622e373b94dc..2f673a84b143c71c53fb2b1619a9c96f48ce1f54 100644 GIT binary patch literal 1248 zcmV<61Rwj0Zv%9Mm*y(^-mW4!3RUy2jtHzDzqBac1!ANM5s{p;?JXvx&2>S>?&XkF zbPIGkWM>T~87d^b%*>nJaAymr5U<`@59@t~4JOftxQT!nX^@`S7|o~~O+Q#D(Tea< z0C6)A8SLE!UVU{sAQSPn29g38Y+ zrV zh6d*@2I!vcc;^lf(cQ?Pz?3Ffe`floQ4ON~yPjBX#2YS)8P-#1)~jzFmH>!#k_@6* z?Pa|=)iAO~EZEsyQPlyI@pPP|aa_}tTnzQ9mri9C=?}A^Int5dTDS)n9ga#mJv#L$ zBY9m{rCPaM=sSjZ=a3+0`3^E(t4n8bT_bUS3MpJWBpL&r6h zJ%Xct#s$ZR(jRC3mjD0TPMY@#rsSEiCtk*Ko=v>Mh-?797)&q4y~SHHepsfE9xmSg z4n8vqb#TmGqZt~l>S+M!&90~(5}z8lX}09MudKf5L= zoQ7bN;+9yVdn_4$#-W-8a=%bwPC+qYzuI##;8LEi@-?!OqX9i;E%3&xZZ$lC!UKW0 ztI@XV?CzcLtFjWGYQ%v+Iu!2ou0&QhpI8MI0hRO^CO&sfNGho0#)!##9qVZs+e^Zn zAM>BcIsA(NAq5bM;1+QtF)fOjm;*qDcL^dv{jdk9uv>HE?a0GDBGXJLK#`lp-e;&o zjd}Qxu}rQ6d00k>F=`7~Y;Mfbj8Q+q+Lx(1k8^~qCv}{{z!kzo5=@e_E)7uoUE+8Hx*1ZStR~&B#Pa~ zJSQQ9U3v>5>e$B4bX1m8ykOK;!Ca5JLSVI-@PG=TTEUyz&H?6DLda~RTskwU z2NgP9h<51tLqz{r-|_ZmG$PUnKo>?$aw(m&R_rgQ%+~6fzd4b~_8Q{JATzF`8UL*a zx%$Q*4uJL|74$eDw`n>9I1NL&1U~&?N&_!HUB@zp?uwL$D6`!7m4f{o_+fh{>s7&^ z`UClq@1IQ8yiYQ{rh=X+;)yZS@p`NPA$jn#OuY5sSx#Z>MroeKFfb+uaa7p&G)VRN Kkn>IvykJ$=6K6R9 literal 2432 zcmV-`34it-l&USX|M+{?-NqC3UwZP!9~cXufwcE;XsEQd>f+7?1R~ctV-!a7+I=7;AIX@R11L(kGXg4P*{S}4tQJMhHrGw*6GOwCE4y6eW$MCwt`LeioePo!slde= zh?{$Gq}3vGyQ$6svV5~iS3zKx!LR|2ec5CkLQizAqGEzt-fk_`+s5&e@WHf6rjd{k zhZO$-U_iXQxa1K=VZZ-qw|dg6hYi>SY@ery+&t@>k9`JGb`K^UJyz>+p(cxCxbDos zJ2^shF~*(w_9xPrHE7`&`Vc`{nHO{>FlRq#&`?Xbt51Jx_(uGAk5{;=sU!4|0(Y4Re6ygl)AYVZi-z$$+Nh;_&D!#ei6?@}SrR<-mX{pxlznb#yqF%3k zdzWnItjjb4Qklw=>U!IZAg3}6!-k0Auptx{p0(uuSPQwhk+p6DlH&^wD^<%%-AMTP zJ_fGnefVaGd^|I}f^o-ZaHl@%765s$(qopGyWX-&6!L4Z6WZj5j8|ZQKGM^$|NgHE zMOsn)CWY9sXB3ZoyZtPiE@Pk6cCku-Y7Z&M8?V9Zy!eJ>%GvHVhX-|BcPas=0FWYm z0v{;q{Es$vc1MCTlaXq>HY-XY*6v9$hRb3>GESvV7S?fLBP@risR&+jIc;v$Utcy& z9)Zkg_*F4evBya$R5O8uC0Vlq6?%vsIo!hXa8pa=aXcF2iGugq@|NU7#$z9Y_}?#4 z+Q|7g@046Hy@Vif%9qLPth@k*UN5U2xCCW4A97>f;wCq8_ReQayY^b-&;PFZEUa@? zzK+Nl>yl+*dG4J0s)uzQZ^{e-XiO==L(OMzKRvxr7gaP%fw3b_)T5aszA8*|eqZF6 zmE%(=t8R`_M=ELV^D7L^SAeve9u~`c=*~$0#9HK)J>J)*LIh5G)aC)mrLiZMedEELl(V76RL!E1^<6RJ=fZ2c#@L3G{z$0ZY%c0 zb%Lvg)z{fZc7S2jww$GYi3_lBy#s235Gs~bhASijhN*QMVvisFnQL`NlR$EWfP8$9 zMum#U9W{YC=2*WhK6GU_2;JbGGeYYzEeSGcBieLH;+{5o8C7*}hmlBxPW;NS zQ;hR_=67B;gIdb^27UN=wx|TsDQ%e+V(Df5K~+PtJd|y&8@f{q^qnxCn-XM2-{eCC zRKS9WrrM;F*CQT-HAoQUA-konS8x(SAF(bi{2XR?0=*}?HtczIOsB zV?Vi-NgEw_h_J`$sWTg840|hJxfz@&QMy$?4+t3_3GIvxIlA44zRJFQkR(NW`d(u# z(HZ~e#@X4g_bO*@91#dvMBpT)lttOQ7)IyYy{V*BfqrKDZr#~0wJuuVmn7OaSb{qs zs`>+nc3)>CLC~z%pBrsPFn!TTMiffrJ|#rDtB>2e9G{r~s(JnHm&nfL%Rt;2V4W|U zYD(Q<$o3L!U$C7Qo+>Ts5`*i5-dj?cZRGDcI6U&Si|<#` zr~<>bOl?t!;WwUW&ueqUoGCC&u_NJ;v;SnojnXiT-hjetj^_N_5Z|(Ze_U9vVw|vCU=o$6v(?S1CcZYkR z$@aRKnohGv8Q$_bC^;rsNdz2e6#jd|J0MX8zR%F2D=pKh&KA-04}9zzzH+=V4Gr++ zqa7%vm3)y>bRFDP5>AL%P+X5T^HM^|Rjqz5REcDKm0IQnC|{;~dP&uTTz~>K-PF8y!Hmg6Zkl$cw03|2pm+;-a;bAfli+ z&-iUS`7V!Y6g#(wva_F9+^k}xOv-2#Ni1}cK2>3+b{}O8o=vo@QQ7fU);-F`2`^ez zJTxA8Xr2u0oz`_J5wM^0v|XKf*3}By`(=0}%GoLvZX7@e^j}`+FJW~p4_`+7-`op< zZ;lk$vMIGij23O*6{bCy?Y~>7a+!dlTe!jT^(E9!00f6f?`s?8l&}q84bx=#^lALC zkhgzwsjay`v*QNB+qP_e4Eb14DWlb;AqG7O!n#D@W0x z6qF%kXj}47Hu{&drkfWUIZKF{_fn9h&MHd~0Exj*MXAo_)*ZO#-`470~ zidlpg0N%7mJC2w)fEJ!PxKdkY2vS~t!UR8f1?`rbbcbGM#KHAp{Vncervc0a&c{=o zctu%Rrz}V&?1-IMs#pxCQyEOUGf0=Hs<;z6I3Lwjt(+Q_@rBPg%G=olwJ$-U z0r{)fb+>}CEI-aLrO0s3&BH^9eI8+2~I4z9-ZE@mM;^ zdQ__V12IU*3Dqs1CxbHG>alvUuot9QbwOwG=Cz$i<;MDk;anv>y_pKoLQ@M0v0rh0 yqfml!vBS0FfZA^RTvj0s;vvF6)Hy|dAPY2Pg4{1K_(ia8pTApJ#MvjcIq}{n62c4s diff --git a/vendor/cloud.google.com/go/license_test.go b/vendor/cloud.google.com/go/license_test.go index 4b87878a026..cff256a47d9 100644 --- a/vendor/cloud.google.com/go/license_test.go +++ b/vendor/cloud.google.com/go/license_test.go @@ -52,7 +52,7 @@ func TestLicense(t *testing.T) { if err != nil { return nil } - src = src[:140] // Ensure all of the sentinel values are at the top of the file. + src = src[:120] // Ensure all of the sentinel values are at the top of the file. // Find license for _, sentinel := range sentinels { diff --git a/vendor/cloud.google.com/go/old-news.md b/vendor/cloud.google.com/go/old-news.md deleted file mode 100644 index 7b3ee44489c..00000000000 --- a/vendor/cloud.google.com/go/old-news.md +++ /dev/null @@ -1,451 +0,0 @@ -_March 17, 2017_ - -Breaking Pubsub changes. -* Publish is now asynchronous -([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)). -* Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)). -* Message.Done replaced with Message.Ack and Message.Nack. - -_February 14, 2017_ - -Release of a client library for Spanner. See -the -[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). - -Note that although the Spanner service is beta, the Go client library is alpha. - -_December 12, 2016_ - -Beta release of BigQuery, DataStore, Logging and Storage. See the -[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html). - -Also, BigQuery now supports structs. Read a row directly into a struct with -`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`. -You can also use field tags. See the [package documentation][cloud-bigquery-ref] -for details. - -_December 5, 2016_ - -More changes to BigQuery: - -* The `ValueList` type was removed. It is no longer necessary. Instead of - ```go - var v ValueList - ... it.Next(&v) .. - ``` - use - - ```go - var v []Value - ... it.Next(&v) ... - ``` - -* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or - `ValueList` would append to the slice. Now each call resets the size to zero first. - -* Schema inference will infer the SQL type BYTES for a struct field of - type []byte. Previously it inferred STRING. - -* The types `uint`, `uint64` and `uintptr` are no longer supported in schema - inference. BigQuery's integer type is INT64, and those types may hold values - that are not correctly represented in a 64-bit signed integer. - -* The SQL types DATE, TIME and DATETIME are now supported. They correspond to - the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` - package. - -_November 17, 2016_ - -Change to BigQuery: values from INTEGER columns will now be returned as int64, -not int. This will avoid errors arising from large values on 32-bit systems. - -_November 8, 2016_ - -New datastore feature: datastore now encodes your nested Go structs as Entity values, -instead of a flattened list of the embedded struct's fields. -This means that you may now have twice-nested slices, eg. -```go -type State struct { - Cities []struct{ - Populations []int - } -} -``` - -See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for -more details. - -_November 8, 2016_ - -Breaking changes to datastore: contexts no longer hold namespaces; instead you -must set a key's namespace explicitly. Also, key functions have been changed -and renamed. - -* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: - ```go - q := datastore.NewQuery("Kind").Namespace("ns") - ``` - -* All the fields of Key are exported. That means you can construct any Key with a struct literal: - ```go - k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} - ``` - -* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. - -* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace - ```go - NewIncompleteKey(ctx, kind, parent) - ``` - with - ```go - IncompleteKey(kind, parent) - ``` - and if you do use namespaces, make sure you set the namespace on the returned key. - -* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace - ```go - NewKey(ctx, kind, name, 0, parent) - NewKey(ctx, kind, "", id, parent) - ``` - with - ```go - NameKey(kind, name, parent) - IDKey(kind, id, parent) - ``` - and if you do use namespaces, make sure you set the namespace on the returned key. - -* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. - -* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. - -See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for -more details. - -_October 27, 2016_ - -Breaking change to bigquery: `NewGCSReference` is now a function, -not a method on `Client`. - -New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling -loading data into a table from a file or any `io.Reader`. - -_October 21, 2016_ - -Breaking change to pubsub: removed `pubsub.Done`. - -Use `iterator.Done` instead, where `iterator` is the package -`google.golang.org/api/iterator`. - -_October 19, 2016_ - -Breaking changes to cloud.google.com/go/bigquery: - -* Client.Table and Client.OpenTable have been removed. - Replace - ```go - client.OpenTable("project", "dataset", "table") - ``` - with - ```go - client.DatasetInProject("project", "dataset").Table("table") - ``` - -* Client.CreateTable has been removed. - Replace - ```go - client.CreateTable(ctx, "project", "dataset", "table") - ``` - with - ```go - client.DatasetInProject("project", "dataset").Table("table").Create(ctx) - ``` - -* Dataset.ListTables have been replaced with Dataset.Tables. - Replace - ```go - tables, err := ds.ListTables(ctx) - ``` - with - ```go - it := ds.Tables(ctx) - for { - table, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - // TODO: Handle error. - } - // TODO: use table. - } - ``` - -* Client.Read has been replaced with Job.Read, Table.Read and Query.Read. - Replace - ```go - it, err := client.Read(ctx, job) - ``` - with - ```go - it, err := job.Read(ctx) - ``` - and similarly for reading from tables or queries. - -* The iterator returned from the Read methods is now named RowIterator. Its - behavior is closer to the other iterators in these libraries. It no longer - supports the Schema method; see the next item. - Replace - ```go - for it.Next(ctx) { - var vals ValueList - if err := it.Get(&vals); err != nil { - // TODO: Handle error. - } - // TODO: use vals. - } - if err := it.Err(); err != nil { - // TODO: Handle error. - } - ``` - with - ``` - for { - var vals ValueList - err := it.Next(&vals) - if err == iterator.Done { - break - } - if err != nil { - // TODO: Handle error. - } - // TODO: use vals. - } - ``` - Instead of the `RecordsPerRequest(n)` option, write - ```go - it.PageInfo().MaxSize = n - ``` - Instead of the `StartIndex(i)` option, write - ```go - it.StartIndex = i - ``` - -* ValueLoader.Load now takes a Schema in addition to a slice of Values. - Replace - ```go - func (vl *myValueLoader) Load(v []bigquery.Value) - ``` - with - ```go - func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema) - ``` - - -* Table.Patch is replace by Table.Update. - Replace - ```go - p := table.Patch() - p.Description("new description") - metadata, err := p.Apply(ctx) - ``` - with - ```go - metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{ - Description: "new description", - }) - ``` - -* Client.Copy is replaced by separate methods for each of its four functions. - All options have been replaced by struct fields. - - * To load data from Google Cloud Storage into a table, use Table.LoaderFrom. - - Replace - ```go - client.Copy(ctx, table, gcsRef) - ``` - with - ```go - table.LoaderFrom(gcsRef).Run(ctx) - ``` - Instead of passing options to Copy, set fields on the Loader: - ```go - loader := table.LoaderFrom(gcsRef) - loader.WriteDisposition = bigquery.WriteTruncate - ``` - - * To extract data from a table into Google Cloud Storage, use - Table.ExtractorTo. Set fields on the returned Extractor instead of - passing options. - - Replace - ```go - client.Copy(ctx, gcsRef, table) - ``` - with - ```go - table.ExtractorTo(gcsRef).Run(ctx) - ``` - - * To copy data into a table from one or more other tables, use - Table.CopierFrom. Set fields on the returned Copier instead of passing options. - - Replace - ```go - client.Copy(ctx, dstTable, srcTable) - ``` - with - ```go - dst.Table.CopierFrom(srcTable).Run(ctx) - ``` - - * To start a query job, create a Query and call its Run method. Set fields - on the query instead of passing options. - - Replace - ```go - client.Copy(ctx, table, query) - ``` - with - ```go - query.Run(ctx) - ``` - -* Table.NewUploader has been renamed to Table.Uploader. Instead of options, - configure an Uploader by setting its fields. - Replace - ```go - u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) - ``` - with - ```go - u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) - u.IgnoreUnknownValues = true - ``` - -_October 10, 2016_ - -Breaking changes to cloud.google.com/go/storage: - -* AdminClient replaced by methods on Client. - Replace - ```go - adminClient.CreateBucket(ctx, bucketName, attrs) - ``` - with - ```go - client.Bucket(bucketName).Create(ctx, projectID, attrs) - ``` - -* BucketHandle.List replaced by BucketHandle.Objects. - Replace - ```go - for query != nil { - objs, err := bucket.List(d.ctx, query) - if err != nil { ... } - query = objs.Next - for _, obj := range objs.Results { - fmt.Println(obj) - } - } - ``` - with - ```go - iter := bucket.Objects(d.ctx, query) - for { - obj, err := iter.Next() - if err == iterator.Done { - break - } - if err != nil { ... } - fmt.Println(obj) - } - ``` - (The `iterator` package is at `google.golang.org/api/iterator`.) - - Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`. - - Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`. - - -* ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom. - Replace - ```go - attrs, err := src.CopyTo(ctx, dst, nil) - ``` - with - ```go - attrs, err := dst.CopierFrom(src).Run(ctx) - ``` - - Replace - ```go - attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"}) - ``` - with - ```go - c := dst.CopierFrom(src) - c.ContextType = "text/html" - attrs, err := c.Run(ctx) - ``` - -* ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom. - Replace - ```go - attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil) - ``` - with - ```go - attrs, err := dst.ComposerFrom(src1, src2).Run(ctx) - ``` - -* ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate. - Replace - ```go - attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"}) - ``` - with - ```go - attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"}) - ``` - -* ObjectHandle.WithConditions replaced by ObjectHandle.If. - Replace - ```go - obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen)) - ``` - with - ```go - obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen}) - ``` - - Replace - ```go - obj.WithConditions(storage.IfGenerationMatch(0)) - ``` - with - ```go - obj.If(storage.Conditions{DoesNotExist: true}) - ``` - -* `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`). - -_October 6, 2016_ - -Package preview/logging deleted. Use logging instead. - -_September 27, 2016_ - -Logging client replaced with preview version (see below). - -_September 8, 2016_ - -* New clients for some of Google's Machine Learning APIs: Vision, Speech, and -Natural Language. - -* Preview version of a new [Stackdriver Logging][cloud-logging] client in -[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). -This client uses gRPC as its transport layer, and supports log reading, sinks -and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. - diff --git a/vendor/cloud.google.com/go/run-tests.sh b/vendor/cloud.google.com/go/run-tests.sh deleted file mode 100755 index f47ff50a5e9..00000000000 --- a/vendor/cloud.google.com/go/run-tests.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash - -# Selectively run tests for this repo, based on what has changed -# in a commit. Runs short tests for the whole repo, and full tests -# for changed directories. - -set -e - -prefix=cloud.google.com/go - -dryrun=false -if [[ $1 == "-n" ]]; then - dryrun=true - shift -fi - -if [[ $1 == "" ]]; then - echo >&2 "usage: $0 [-n] COMMIT" - exit 1 -fi - -# Files or directories that cause all tests to run if modified. -declare -A run_all -run_all=([.travis.yml]=1 [run-tests.sh]=1) - -function run { - if $dryrun; then - echo $* - else - (set -x; $*) - fi -} - - -# Find all the packages that have changed in this commit. -declare -A changed_packages - -for f in $(git diff-tree --no-commit-id --name-only -r $1); do - if [[ ${run_all[$f]} == 1 ]]; then - # This change requires a full test. Do it and exit. - run go test -race -v $prefix/... - exit - fi - # Map, e.g., "spanner/client.go" to "$prefix/spanner". - d=$(dirname $f) - if [[ $d == "." ]]; then - pkg=$prefix - else - pkg=$prefix/$d - fi - changed_packages[$pkg]=1 -done - -echo "changed packages: ${!changed_packages[*]}" - - -# Reports whether its argument, a package name, depends (recursively) -# on a changed package. -function depends_on_changed_package { - # According to go list, a package does not depend on itself, so - # we test that separately. - if [[ ${changed_packages[$1]} == 1 ]]; then - return 0 - fi - for dep in $(go list -f '{{range .Deps}}{{.}} {{end}}' $1); do - if [[ ${changed_packages[$dep]} == 1 ]]; then - return 0 - fi - done - return 1 -} - -# Collect the packages into two separate lists. (It is faster go test a list of -# packages than to individually go test each one.) - -shorts= -fulls= -for pkg in $(go list $prefix/...); do # for each package in the repo - if depends_on_changed_package $pkg; then # if it depends on a changed package - fulls="$fulls $pkg" # run the full test - else # otherwise - shorts="$shorts $pkg" # run the short test - fi -done -run go test -race -v -short $shorts -if [[ $fulls != "" ]]; then - run go test -race -v $fulls -fi diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml index c3af3ce27cd..2d8c086617e 100644 --- a/vendor/github.com/Sirupsen/logrus/.travis.yml +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -2,9 +2,7 @@ language: go go: - 1.2 - 1.3 + - 1.4 - tip install: - - go get github.com/stretchr/testify - - go get github.com/stvp/go-udp-testing - - go get github.com/tobi/airbrake-go - - go get github.com/getsentry/raven-go + - go get -t ./... diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 00000000000..eb72bff93b7 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,7 @@ +# 0.7.3 + +formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md index b6aa84c987f..d55f9092479 100644 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -1,10 +1,11 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0), the core API is unlikely change much but please version -control your Logrus to make sure you aren't fetching latest `master` on every -build.** +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** Nicely color-coded in development (when a TTY is attached, otherwise just plain text): @@ -33,14 +34,16 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not attached, the output is compatible with the -[l2met](http://r.32k.io/l2met-introduction) format: +[logfmt](http://godoc.org/github.com/kr/logfmt) format: ```text -time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10 -time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122 -time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10 -time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9 -time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100 +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 ``` #### Example @@ -81,7 +84,7 @@ func init() { // Use the Airbrake hook to report errors that have Error severity or above to // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(&logrus_airbrake.AirbrakeHook{}) + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) // Output to stderr instead of stdout, could also be a file. log.SetOutput(os.Stderr) @@ -105,6 +108,16 @@ func main() { "omg": true, "number": 100, }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") } ``` @@ -163,43 +176,8 @@ You can add hooks for logging levels. For example to send errors to an exception tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to multiple places simultaneously, e.g. syslog. -```go -// Not the real implementation of the Airbrake hook. Just a simple sample. -import ( - log "github.com/Sirupsen/logrus" -) - -func init() { - log.AddHook(new(AirbrakeHook)) -} - -type AirbrakeHook struct{} - -// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains -// the fields for the entry. See the Fields section of the README. -func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { - err := airbrake.Notify(entry.Data["error"].(error)) - if err != nil { - log.WithFields(log.Fields{ - "source": "airbrake", - "endpoint": airbrake.Endpoint, - }).Info("Failed to send error to Airbrake") - } - - return nil -} - -// `Levels()` returns a slice of `Levels` the hook is fired for. -func (hook *AirbrakeHook) Levels() []log.Level { - return []log.Level{ - log.ErrorLevel, - log.FatalLevel, - log.PanicLevel, - } -} -``` - -Logrus comes with built-in hooks. Add those, or your custom hook, in `init`: +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: ```go import ( @@ -210,7 +188,7 @@ import ( ) func init() { - log.AddHook(new(logrus_airbrake.AirbrakeHook)) + log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") if err != nil { @@ -221,22 +199,18 @@ func init() { } ``` -* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) - Send errors to an exception tracking service compatible with the Airbrake API. - Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. -* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) - Send errors to the Papertrail hosted logging service via UDP. - -* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) - Send errors to remote syslog server. - Uses standard library `log/syslog` behind the scenes. - -* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus) - Send errors to a channel in hipchat. - -* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly) - Send logs to Loggly (https://www.loggly.com/) +| Hook | Description | +| ----- | ----------- | +| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | #### Level logging @@ -314,10 +288,15 @@ The built-in logging formatters are: field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true` * `logrus.JSONFormatter`. Logs fields as JSON. +* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). + + ```go + logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) + ``` Third party logging formatters: -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a @@ -342,10 +321,28 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { } ``` +#### Logger as an `io.Writer` + +Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + #### Rotation Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotated(8)`) that can compress and delete old log +external program (like `logrotate(8)`) that can compress and delete old log entries. It should not be a feature of the application-level logger. diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go index e164eecb5f3..17fe6f707bc 100644 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -126,6 +126,10 @@ func (entry *Entry) Warn(args ...interface{}) { } } +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + func (entry *Entry) Error(args ...interface{}) { if entry.Logger.Level >= ErrorLevel { entry.log(ErrorLevel, fmt.Sprint(args...)) diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go index d0871244814..a67e1b802d9 100644 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -9,6 +9,10 @@ var ( std = New() ) +func StandardLogger() *Logger { + return std +} + // SetOutput sets the standard logger output. func SetOutput(out io.Writer) { std.mu.Lock() @@ -32,6 +36,8 @@ func SetLevel(level Level) { // GetLevel returns the standard logger level. func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() return std.Level } diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go index 038ce9fd297..104d689f187 100644 --- a/vendor/github.com/Sirupsen/logrus/formatter.go +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -1,5 +1,9 @@ package logrus +import "time" + +const DefaultTimestampFormat = time.RFC3339 + // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: // diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go index b09227c2b5d..dcc4f1d9fd7 100644 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -3,18 +3,32 @@ package logrus import ( "encoding/json" "fmt" - "time" ) -type JSONFormatter struct{} +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string +} func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data := make(Fields, len(entry.Data)+3) for k, v := range entry.Data { - data[k] = v + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } } prefixFieldClashes(data) - data["time"] = entry.Time.Format(time.RFC3339) + + if f.TimestampFormat == "" { + f.TimestampFormat = DefaultTimestampFormat + } + + data["time"] = entry.Time.Format(f.TimestampFormat) data["msg"] = entry.Message data["level"] = entry.Level.String() diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go new file mode 100644 index 00000000000..1d70873254d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go @@ -0,0 +1,120 @@ +package logrus + +import ( + "encoding/json" + "errors" + + "testing" +) + +func TestErrorNotLost(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["error"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["omg"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestFieldClashWithTime(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("time", "right now!")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.time"] != "right now!" { + t.Fatal("fields.time not set to original time field") + } + + if entry["time"] != "0001-01-01T00:00:00Z" { + t.Fatal("time field not set to current time, was: ", entry["time"]) + } +} + +func TestFieldClashWithMsg(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("msg", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.msg"] != "something" { + t.Fatal("fields.msg not set to original msg field") + } +} + +func TestFieldClashWithLevel(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.level"] != "something" { + t.Fatal("fields.level not set to original level field") + } +} + +func TestJSONEntryEndsWithNewline(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + if b[len(b)-1] != '\n' { + t.Fatal("Expected JSON log entry to end with a newline") + } +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go index b392e547a7b..da928a37509 100644 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -65,11 +65,15 @@ func (logger *Logger) WithFields(fields Fields) *Entry { } func (logger *Logger) Debugf(format string, args ...interface{}) { - NewEntry(logger).Debugf(format, args...) + if logger.Level >= DebugLevel { + NewEntry(logger).Debugf(format, args...) + } } func (logger *Logger) Infof(format string, args ...interface{}) { - NewEntry(logger).Infof(format, args...) + if logger.Level >= InfoLevel { + NewEntry(logger).Infof(format, args...) + } } func (logger *Logger) Printf(format string, args ...interface{}) { @@ -77,31 +81,45 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - NewEntry(logger).Warnf(format, args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } } func (logger *Logger) Warningf(format string, args ...interface{}) { - NewEntry(logger).Warnf(format, args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } } func (logger *Logger) Errorf(format string, args ...interface{}) { - NewEntry(logger).Errorf(format, args...) + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorf(format, args...) + } } func (logger *Logger) Fatalf(format string, args ...interface{}) { - NewEntry(logger).Fatalf(format, args...) + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalf(format, args...) + } } func (logger *Logger) Panicf(format string, args ...interface{}) { - NewEntry(logger).Panicf(format, args...) + if logger.Level >= PanicLevel { + NewEntry(logger).Panicf(format, args...) + } } func (logger *Logger) Debug(args ...interface{}) { - NewEntry(logger).Debug(args...) + if logger.Level >= DebugLevel { + NewEntry(logger).Debug(args...) + } } func (logger *Logger) Info(args ...interface{}) { - NewEntry(logger).Info(args...) + if logger.Level >= InfoLevel { + NewEntry(logger).Info(args...) + } } func (logger *Logger) Print(args ...interface{}) { @@ -109,31 +127,45 @@ func (logger *Logger) Print(args ...interface{}) { } func (logger *Logger) Warn(args ...interface{}) { - NewEntry(logger).Warn(args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } } func (logger *Logger) Warning(args ...interface{}) { - NewEntry(logger).Warn(args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } } func (logger *Logger) Error(args ...interface{}) { - NewEntry(logger).Error(args...) + if logger.Level >= ErrorLevel { + NewEntry(logger).Error(args...) + } } func (logger *Logger) Fatal(args ...interface{}) { - NewEntry(logger).Fatal(args...) + if logger.Level >= FatalLevel { + NewEntry(logger).Fatal(args...) + } } func (logger *Logger) Panic(args ...interface{}) { - NewEntry(logger).Panic(args...) + if logger.Level >= PanicLevel { + NewEntry(logger).Panic(args...) + } } func (logger *Logger) Debugln(args ...interface{}) { - NewEntry(logger).Debugln(args...) + if logger.Level >= DebugLevel { + NewEntry(logger).Debugln(args...) + } } func (logger *Logger) Infoln(args ...interface{}) { - NewEntry(logger).Infoln(args...) + if logger.Level >= InfoLevel { + NewEntry(logger).Infoln(args...) + } } func (logger *Logger) Println(args ...interface{}) { @@ -141,21 +173,31 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - NewEntry(logger).Warnln(args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } } func (logger *Logger) Warningln(args ...interface{}) { - NewEntry(logger).Warnln(args...) + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } } func (logger *Logger) Errorln(args ...interface{}) { - NewEntry(logger).Errorln(args...) + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorln(args...) + } } func (logger *Logger) Fatalln(args ...interface{}) { - NewEntry(logger).Fatalln(args...) + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalln(args...) + } } func (logger *Logger) Panicln(args ...interface{}) { - NewEntry(logger).Panicln(args...) + if logger.Level >= PanicLevel { + NewEntry(logger).Panicln(args...) + } } diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go index 53025425816..efaacea2361 100644 --- a/vendor/github.com/Sirupsen/logrus/logrus_test.go +++ b/vendor/github.com/Sirupsen/logrus/logrus_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "strconv" "strings" + "sync" "testing" "github.com/stretchr/testify/assert" @@ -190,7 +191,7 @@ func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { log.WithField("level", 1).Info("test") }, func(fields Fields) { assert.Equal(t, fields["level"], "info") - assert.Equal(t, fields["fields.level"], 1) + assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only }) } @@ -223,7 +224,7 @@ func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { err := json.Unmarshal(buffer.Bytes(), &fields) assert.NoError(t, err, "should have decoded first message") - assert.Len(t, fields, 4, "should only have msg/time/level/context fields") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") assert.Equal(t, fields["msg"], "looks delicious") assert.Equal(t, fields["context"], "eating raw fish") @@ -233,7 +234,7 @@ func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { err = json.Unmarshal(buffer.Bytes(), &fields) assert.NoError(t, err, "should have decoded second message") - assert.Len(t, fields, 4, "should only have msg/time/level/context fields") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") assert.Equal(t, fields["msg"], "omg it is!") assert.Equal(t, fields["context"], "eating raw fish") assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") @@ -281,3 +282,20 @@ func TestParseLevel(t *testing.T) { l, err = ParseLevel("invalid") assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) } + +func TestGetSetLevelRace(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + if i%2 == 0 { + SetLevel(InfoLevel) + } else { + GetLevel() + } + }(i) + + } + wg.Wait() +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go index 276447bd5c0..b8bebc13eea 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux,!appengine darwin freebsd +// +build linux darwin freebsd openbsd package logrus diff --git a/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go b/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go new file mode 100644 index 00000000000..af609a53d64 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go @@ -0,0 +1,7 @@ +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go index 78e78893568..612417ff9c8 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -3,7 +3,6 @@ package logrus import ( "bytes" "fmt" - "regexp" "sort" "strings" "time" @@ -15,12 +14,12 @@ const ( green = 32 yellow = 33 blue = 34 + gray = 37 ) var ( baseTimestamp time.Time isTerminal bool - noQuoteNeeded *regexp.Regexp ) func init() { @@ -34,20 +33,37 @@ func miniTS() int { type TextFormatter struct { // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool + ForceColors bool + + // Force disabling colors. DisableColors bool - // Set to true to disable timestamp logging (useful when the output - // is redirected to a logging system already adding a timestamp) + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool } func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - - var keys []string + var keys []string = make([]string, 0, len(entry.Data)) for k := range entry.Data { keys = append(keys, k) } - sort.Strings(keys) + + if !f.DisableSorting { + sort.Strings(keys) + } b := &bytes.Buffer{} @@ -55,11 +71,14 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { isColored := (f.ForceColors || isTerminal) && !f.DisableColors + if f.TimestampFormat == "" { + f.TimestampFormat = DefaultTimestampFormat + } if isColored { - printColored(b, entry, keys) + f.printColored(b, entry, keys) } else { if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339)) + f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat)) } f.appendKeyValue(b, "level", entry.Level.String()) f.appendKeyValue(b, "msg", entry.Message) @@ -72,9 +91,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { return b.Bytes(), nil } -func printColored(b *bytes.Buffer, entry *Entry, keys []string) { +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { var levelColor int switch entry.Level { + case DebugLevel: + levelColor = gray case WarnLevel: levelColor = yellow case ErrorLevel, FatalLevel, PanicLevel: @@ -85,7 +106,11 @@ func printColored(b *bytes.Buffer, entry *Entry, keys []string) { levelText := strings.ToUpper(entry.Level.String())[0:4] - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) + } for _, k := range keys { v := entry.Data[k] fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) @@ -96,7 +121,7 @@ func needsQuoting(text string) bool { for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch < '9') || + (ch >= '0' && ch <= '9') || ch == '-' || ch == '.') { return false } diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go index f604f1b0026..e25a44f67bf 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go @@ -3,8 +3,8 @@ package logrus import ( "bytes" "errors" - "testing" + "time" ) func TestQuoting(t *testing.T) { @@ -25,9 +25,37 @@ func TestQuoting(t *testing.T) { checkQuoting(false, "abcd") checkQuoting(false, "v1.0") + checkQuoting(false, "1234567890") checkQuoting(true, "/foobar") checkQuoting(true, "x y") checkQuoting(true, "x,y") checkQuoting(false, errors.New("invalid")) checkQuoting(true, errors.New("invalid argument")) } + +func TestTimestampFormat(t *testing.T) { + checkTimeStr := func(format string) { + customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} + customStr, _ := customFormatter.Format(WithField("test", "test")) + timeStart := bytes.Index(customStr, ([]byte)("time=")) + timeEnd := bytes.Index(customStr, ([]byte)("level=")) + timeStr := customStr[timeStart+5 : timeEnd-1] + if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { + timeStr = timeStr[1 : len(timeStr)-1] + } + if format == "" { + format = time.RFC3339 + } + _, e := time.Parse(format, (string)(timeStr)) + if e != nil { + t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) + } + } + + checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") + checkTimeStr("Mon Jan _2 15:04:05 2006") + checkTimeStr("") +} + +// TODO add tests for sorting etc., this requires a parser for the text +// formatter output. diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 00000000000..1e30b1c753a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,31 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + reader, writer := io.Pipe() + + go logger.writerScanner(reader) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (logger *Logger) writerScanner(reader *io.PipeReader) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + logger.Print(scanner.Text()) + } + if err := scanner.Err(); err != nil { + logger.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md index ef243177f8a..9e194f1ff71 100644 --- a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md @@ -1,3 +1,43 @@ +Release v1.10.51 (2017-09-22) +=== + +### Service Client Updates +* `service/config`: Updates service API and documentation +* `service/ecs`: Updates service API and documentation + * Amazon ECS users can now add and drop Linux capabilities to their containers through the use of docker's cap-add and cap-drop features. Customers can specify the capabilities they wish to add or drop for each container in their task definition. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/rds`: Updates service documentation + * Documentation updates for rds + +Release v1.10.50 (2017-09-21) +=== + +### Service Client Updates +* `service/budgets`: Updates service API + * Including "DuplicateRecordException" in UpdateNotification and UpdateSubscriber. +* `service/ec2`: Updates service API and documentation + * Add EC2 APIs to copy Amazon FPGA Images (AFIs) within the same region and across multiple regions, delete AFIs, and modify AFI attributes. AFI attributes include name, description and granting/denying other AWS accounts to load the AFI. +* `service/logs`: Updates service API and documentation + * Adds support for associating LogGroups with KMS Keys. + +### SDK Bugs +* Fix greengrass service model being duplicated with different casing. ([#1541](https://github.com/aws/aws-sdk-go/pull/1541)) + * Fixes [#1540](https://github.com/aws/aws-sdk-go/issues/1540) + * Fixes [#1539](https://github.com/aws/aws-sdk-go/issues/1539) +Release v1.10.49 (2017-09-20) +=== + +### Service Client Updates +* `service/Greengrass`: Adds new service +* `service/appstream`: Updates service API and documentation + * API updates for supporting On-Demand fleets. +* `service/codepipeline`: Updates service API and documentation + * This change includes a PipelineMetadata object that is part of the output from the GetPipeline API that includes the Pipeline ARN, created, and updated timestamp. +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/rds`: Updates service API and documentation + * Introduces the --option-group-name parameter to the ModifyDBSnapshot CLI command. You can specify this parameter when you upgrade an Oracle DB snapshot. The same option group considerations apply when upgrading a DB snapshot as when upgrading a DB instance. For more information, see http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Oracle.html#USER_UpgradeDBInstance.Oracle.OGPG.OG +* `service/runtime.lex`: Updates service API and documentation + Release v1.10.48 (2017-09-19) === diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 56fefcfc6aa..771c122d467 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -1023,6 +1023,8 @@ var awsPartition = partition{ Endpoints: endpoints{ "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "greengrass": service{ @@ -1031,6 +1033,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "us-east-1": endpoint{}, @@ -1545,6 +1548,7 @@ var awsPartition = partition{ "snowball": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -1825,6 +1829,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 403f55605d8..66802ad327b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.10.48" +const SDKVersion = "1.10.51" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index ccb14369a90..28c0a1e2e37 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -2306,6 +2306,80 @@ func (c *EC2) ConfirmProductInstanceWithContext(ctx aws.Context, input *ConfirmP return out, req.Send() } +const opCopyFpgaImage = "CopyFpgaImage" + +// CopyFpgaImageRequest generates a "aws/request.Request" representing the +// client's request for the CopyFpgaImage operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyFpgaImage for more information on using the CopyFpgaImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyFpgaImageRequest method. +// req, resp := client.CopyFpgaImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImage +func (c *EC2) CopyFpgaImageRequest(input *CopyFpgaImageInput) (req *request.Request, output *CopyFpgaImageOutput) { + op := &request.Operation{ + Name: opCopyFpgaImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyFpgaImageInput{} + } + + output = &CopyFpgaImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyFpgaImage API operation for Amazon Elastic Compute Cloud. +// +// Copies the specified Amazon FPGA Image (AFI) to the current region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CopyFpgaImage for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImage +func (c *EC2) CopyFpgaImage(input *CopyFpgaImageInput) (*CopyFpgaImageOutput, error) { + req, out := c.CopyFpgaImageRequest(input) + return out, req.Send() +} + +// CopyFpgaImageWithContext is the same as CopyFpgaImage with the addition of +// the ability to pass a context and additional request options. +// +// See CopyFpgaImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CopyFpgaImageWithContext(ctx aws.Context, input *CopyFpgaImageInput, opts ...request.Option) (*CopyFpgaImageOutput, error) { + req, out := c.CopyFpgaImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCopyImage = "CopyImage" // CopyImageRequest generates a "aws/request.Request" representing the @@ -5466,6 +5540,80 @@ func (c *EC2) DeleteFlowLogsWithContext(ctx aws.Context, input *DeleteFlowLogsIn return out, req.Send() } +const opDeleteFpgaImage = "DeleteFpgaImage" + +// DeleteFpgaImageRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFpgaImage operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFpgaImage for more information on using the DeleteFpgaImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFpgaImageRequest method. +// req, resp := client.DeleteFpgaImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImage +func (c *EC2) DeleteFpgaImageRequest(input *DeleteFpgaImageInput) (req *request.Request, output *DeleteFpgaImageOutput) { + op := &request.Operation{ + Name: opDeleteFpgaImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFpgaImageInput{} + } + + output = &DeleteFpgaImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteFpgaImage API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteFpgaImage for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImage +func (c *EC2) DeleteFpgaImage(input *DeleteFpgaImageInput) (*DeleteFpgaImageOutput, error) { + req, out := c.DeleteFpgaImageRequest(input) + return out, req.Send() +} + +// DeleteFpgaImageWithContext is the same as DeleteFpgaImage with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFpgaImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteFpgaImageWithContext(ctx aws.Context, input *DeleteFpgaImageInput, opts ...request.Option) (*DeleteFpgaImageOutput, error) { + req, out := c.DeleteFpgaImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteInternetGateway = "DeleteInternetGateway" // DeleteInternetGatewayRequest generates a "aws/request.Request" representing the @@ -8219,6 +8367,80 @@ func (c *EC2) DescribeFlowLogsWithContext(ctx aws.Context, input *DescribeFlowLo return out, req.Send() } +const opDescribeFpgaImageAttribute = "DescribeFpgaImageAttribute" + +// DescribeFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFpgaImageAttribute for more information on using the DescribeFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFpgaImageAttributeRequest method. +// req, resp := client.DescribeFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttribute +func (c *EC2) DescribeFpgaImageAttributeRequest(input *DescribeFpgaImageAttributeInput) (req *request.Request, output *DescribeFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opDescribeFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFpgaImageAttributeInput{} + } + + output = &DescribeFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified attribute of the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFpgaImageAttribute for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttribute +func (c *EC2) DescribeFpgaImageAttribute(input *DescribeFpgaImageAttributeInput) (*DescribeFpgaImageAttributeOutput, error) { + req, out := c.DescribeFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// DescribeFpgaImageAttributeWithContext is the same as DescribeFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFpgaImageAttributeWithContext(ctx aws.Context, input *DescribeFpgaImageAttributeInput, opts ...request.Option) (*DescribeFpgaImageAttributeOutput, error) { + req, out := c.DescribeFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeFpgaImages = "DescribeFpgaImages" // DescribeFpgaImagesRequest generates a "aws/request.Request" representing the @@ -15093,20 +15315,24 @@ func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request. // GetPasswordData API operation for Amazon Elastic Compute Cloud. // -// Retrieves the encrypted administrator password for an instance running Windows. +// Retrieves the encrypted administrator password for a running Windows instance. // -// The Windows password is generated at boot if the EC2Config service plugin, -// Ec2SetPassword, is enabled. This usually only happens the first time an AMI -// is launched, and then Ec2SetPassword is automatically disabled. The password -// is not generated for rebundled AMIs unless Ec2SetPassword is enabled before -// bundling. +// The Windows password is generated at boot by the EC2Config service or EC2Launch +// scripts (Windows Server 2016 and later). This usually only happens the first +// time an instance is launched. For more information, see EC2Config (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/UsingConfig_WinAMI.html) +// and EC2Launch (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2launch.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For the EC2Config service, the password is not generated for rebundled AMIs +// unless Ec2SetPassword is enabled before bundling. // // The password is encrypted using the key pair that you specified when you // launched the instance. You must provide the corresponding key pair file. // -// Password generation and encryption takes a few moments. We recommend that -// you wait up to 15 minutes after launching an instance before trying to retrieve -// the generated password. +// When you launch an instance, password generation and encryption may take +// a few minutes. If you try to retrieve the password before it's available, +// the output returns an empty string. We recommend that you wait up to 15 minutes +// after launching an instance before trying to retrieve the generated password. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -15602,6 +15828,80 @@ func (c *EC2) ImportVolumeWithContext(ctx aws.Context, input *ImportVolumeInput, return out, req.Send() } +const opModifyFpgaImageAttribute = "ModifyFpgaImageAttribute" + +// ModifyFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyFpgaImageAttribute for more information on using the ModifyFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyFpgaImageAttributeRequest method. +// req, resp := client.ModifyFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttribute +func (c *EC2) ModifyFpgaImageAttributeRequest(input *ModifyFpgaImageAttributeInput) (req *request.Request, output *ModifyFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opModifyFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyFpgaImageAttributeInput{} + } + + output = &ModifyFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified attribute of the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyFpgaImageAttribute for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttribute +func (c *EC2) ModifyFpgaImageAttribute(input *ModifyFpgaImageAttributeInput) (*ModifyFpgaImageAttributeOutput, error) { + req, out := c.ModifyFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// ModifyFpgaImageAttributeWithContext is the same as ModifyFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyFpgaImageAttributeWithContext(ctx aws.Context, input *ModifyFpgaImageAttributeInput, opts ...request.Option) (*ModifyFpgaImageAttributeOutput, error) { + req, out := c.ModifyFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyHosts = "ModifyHosts" // ModifyHostsRequest generates a "aws/request.Request" representing the @@ -18465,6 +18765,81 @@ func (c *EC2) RequestSpotInstancesWithContext(ctx aws.Context, input *RequestSpo return out, req.Send() } +const opResetFpgaImageAttribute = "ResetFpgaImageAttribute" + +// ResetFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetFpgaImageAttribute for more information on using the ResetFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetFpgaImageAttributeRequest method. +// req, resp := client.ResetFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttribute +func (c *EC2) ResetFpgaImageAttributeRequest(input *ResetFpgaImageAttributeInput) (req *request.Request, output *ResetFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opResetFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetFpgaImageAttributeInput{} + } + + output = &ResetFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResetFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Resets the specified attribute of the specified Amazon FPGA Image (AFI) to +// its default value. You can only reset the load permission attribute. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ResetFpgaImageAttribute for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttribute +func (c *EC2) ResetFpgaImageAttribute(input *ResetFpgaImageAttributeInput) (*ResetFpgaImageAttributeOutput, error) { + req, out := c.ResetFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// ResetFpgaImageAttributeWithContext is the same as ResetFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetFpgaImageAttributeWithContext(ctx aws.Context, input *ResetFpgaImageAttributeInput, opts ...request.Option) (*ResetFpgaImageAttributeOutput, error) { + req, out := c.ResetFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opResetImageAttribute = "ResetImageAttribute" // ResetImageAttributeRequest generates a "aws/request.Request" representing the @@ -23511,6 +23886,123 @@ func (s *ConversionTask) SetTags(v []*Tag) *ConversionTask { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImageRequest +type CopyFpgaImageInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // The description for the new AFI. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name for the new AFI. The default is the name of the source AFI. + Name *string `type:"string"` + + // The ID of the source AFI. + // + // SourceFpgaImageId is a required field + SourceFpgaImageId *string `type:"string" required:"true"` + + // The region that contains the source AFI. + // + // SourceRegion is a required field + SourceRegion *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyFpgaImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyFpgaImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyFpgaImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyFpgaImageInput"} + if s.SourceFpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("SourceFpgaImageId")) + } + if s.SourceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CopyFpgaImageInput) SetClientToken(v string) *CopyFpgaImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CopyFpgaImageInput) SetDescription(v string) *CopyFpgaImageInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CopyFpgaImageInput) SetDryRun(v bool) *CopyFpgaImageInput { + s.DryRun = &v + return s +} + +// SetName sets the Name field's value. +func (s *CopyFpgaImageInput) SetName(v string) *CopyFpgaImageInput { + s.Name = &v + return s +} + +// SetSourceFpgaImageId sets the SourceFpgaImageId field's value. +func (s *CopyFpgaImageInput) SetSourceFpgaImageId(v string) *CopyFpgaImageInput { + s.SourceFpgaImageId = &v + return s +} + +// SetSourceRegion sets the SourceRegion field's value. +func (s *CopyFpgaImageInput) SetSourceRegion(v string) *CopyFpgaImageInput { + s.SourceRegion = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImageResult +type CopyFpgaImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AFI. + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` +} + +// String returns the string representation +func (s CopyFpgaImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyFpgaImageOutput) GoString() string { + return s.String() +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *CopyFpgaImageOutput) SetFpgaImageId(v string) *CopyFpgaImageOutput { + s.FpgaImageId = &v + return s +} + // Contains the parameters for CopyImage. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyImageRequest type CopyImageInput struct { @@ -27471,6 +27963,81 @@ func (s *DeleteFlowLogsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *DeleteFlo return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImageRequest +type DeleteFpgaImageInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFpgaImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFpgaImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFpgaImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFpgaImageInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteFpgaImageInput) SetDryRun(v bool) *DeleteFpgaImageInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *DeleteFpgaImageInput) SetFpgaImageId(v string) *DeleteFpgaImageInput { + s.FpgaImageId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImageResult +type DeleteFpgaImageOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DeleteFpgaImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFpgaImageOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DeleteFpgaImageOutput) SetReturn(v bool) *DeleteFpgaImageOutput { + s.Return = &v + return s +} + // Contains the parameters for DeleteInternetGateway. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInternetGatewayRequest type DeleteInternetGatewayInput struct { @@ -30174,6 +30741,95 @@ func (s *DescribeFlowLogsOutput) SetNextToken(v string) *DescribeFlowLogsOutput return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttributeRequest +type DescribeFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The AFI attribute. + // + // Attribute is a required field + Attribute *string `type:"string" required:"true" enum:"FpgaImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFpgaImageAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DescribeFpgaImageAttributeInput) SetAttribute(v string) *DescribeFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFpgaImageAttributeInput) SetDryRun(v bool) *DescribeFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *DescribeFpgaImageAttributeInput) SetFpgaImageId(v string) *DescribeFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttributeResult +type DescribeFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Information about the attribute. + FpgaImageAttribute *FpgaImageAttribute `locationName:"fpgaImageAttribute" type:"structure"` +} + +// String returns the string representation +func (s DescribeFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetFpgaImageAttribute sets the FpgaImageAttribute field's value. +func (s *DescribeFpgaImageAttributeOutput) SetFpgaImageAttribute(v *FpgaImageAttribute) *DescribeFpgaImageAttributeOutput { + s.FpgaImageAttribute = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImagesRequest type DescribeFpgaImagesInput struct { _ struct{} `type:"structure"` @@ -39763,6 +40419,9 @@ type FpgaImage struct { // The product codes for the AFI. ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + // Indicates whether the AFI is public. + Public *bool `locationName:"public" type:"boolean"` + // The version of the AWS Shell that was used to create the bitstream. ShellVersion *string `locationName:"shellVersion" type:"string"` @@ -39840,6 +40499,12 @@ func (s *FpgaImage) SetProductCodes(v []*ProductCode) *FpgaImage { return s } +// SetPublic sets the Public field's value. +func (s *FpgaImage) SetPublic(v bool) *FpgaImage { + s.Public = &v + return s +} + // SetShellVersion sets the ShellVersion field's value. func (s *FpgaImage) SetShellVersion(v string) *FpgaImage { s.ShellVersion = &v @@ -39864,6 +40529,67 @@ func (s *FpgaImage) SetUpdateTime(v time.Time) *FpgaImage { return s } +// Describes an Amazon FPGA image (AFI) attribute. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImageAttribute +type FpgaImageAttribute struct { + _ struct{} `type:"structure"` + + // The description of the AFI. + Description *string `locationName:"description" type:"string"` + + // The ID of the AFI. + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` + + // One or more load permissions. + LoadPermissions []*LoadPermission `locationName:"loadPermissions" locationNameList:"item" type:"list"` + + // The name of the AFI. + Name *string `locationName:"name" type:"string"` + + // One or more product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s FpgaImageAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FpgaImageAttribute) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *FpgaImageAttribute) SetDescription(v string) *FpgaImageAttribute { + s.Description = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *FpgaImageAttribute) SetFpgaImageId(v string) *FpgaImageAttribute { + s.FpgaImageId = &v + return s +} + +// SetLoadPermissions sets the LoadPermissions field's value. +func (s *FpgaImageAttribute) SetLoadPermissions(v []*LoadPermission) *FpgaImageAttribute { + s.LoadPermissions = v + return s +} + +// SetName sets the Name field's value. +func (s *FpgaImageAttribute) SetName(v string) *FpgaImageAttribute { + s.Name = &v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *FpgaImageAttribute) SetProductCodes(v []*ProductCode) *FpgaImageAttribute { + s.ProductCodes = v + return s +} + // Describes the state of the bitstream generation process for an Amazon FPGA // image (AFI). // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImageState @@ -40266,7 +40992,8 @@ type GetPasswordDataOutput struct { // The ID of the Windows instance. InstanceId *string `locationName:"instanceId" type:"string"` - // The password of the instance. + // The password of the instance. Returns an empty string if the password is + // not available. PasswordData *string `locationName:"passwordData" type:"string"` // The time the data was last updated. @@ -44641,6 +45368,259 @@ func (s *LaunchSpecification) SetUserData(v string) *LaunchSpecification { return s } +// Describes a load permission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermission +type LoadPermission struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s LoadPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermission) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *LoadPermission) SetGroup(v string) *LoadPermission { + s.Group = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *LoadPermission) SetUserId(v string) *LoadPermission { + s.UserId = &v + return s +} + +// Describes modifications to the load permissions of an Amazon FPGA image (AFI). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermissionModifications +type LoadPermissionModifications struct { + _ struct{} `type:"structure"` + + // The load permissions to add. + Add []*LoadPermissionRequest `locationNameList:"item" type:"list"` + + // The load permissions to remove. + Remove []*LoadPermissionRequest `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LoadPermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermissionModifications) GoString() string { + return s.String() +} + +// SetAdd sets the Add field's value. +func (s *LoadPermissionModifications) SetAdd(v []*LoadPermissionRequest) *LoadPermissionModifications { + s.Add = v + return s +} + +// SetRemove sets the Remove field's value. +func (s *LoadPermissionModifications) SetRemove(v []*LoadPermissionRequest) *LoadPermissionModifications { + s.Remove = v + return s +} + +// Describes a load permission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermissionRequest +type LoadPermissionRequest struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s LoadPermissionRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermissionRequest) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *LoadPermissionRequest) SetGroup(v string) *LoadPermissionRequest { + s.Group = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *LoadPermissionRequest) SetUserId(v string) *LoadPermissionRequest { + s.UserId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttributeRequest +type ModifyFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Attribute *string `type:"string" enum:"FpgaImageAttributeName"` + + // A description for the AFI. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` + + // The load permission for the AFI. + LoadPermission *LoadPermissionModifications `type:"structure"` + + // A name for the AFI. + Name *string `type:"string"` + + // The operation type. + OperationType *string `type:"string" enum:"OperationType"` + + // One or more product codes. After you add a product code to an AFI, it can't + // be removed. This parameter is valid only when modifying the productCodes + // attribute. + ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"` + + // One or more user groups. This parameter is valid only when modifying the + // loadPermission attribute. + UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"` + + // One or more AWS account IDs. This parameter is valid only when modifying + // the loadPermission attribute. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` +} + +// String returns the string representation +func (s ModifyFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyFpgaImageAttributeInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ModifyFpgaImageAttributeInput) SetAttribute(v string) *ModifyFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ModifyFpgaImageAttributeInput) SetDescription(v string) *ModifyFpgaImageAttributeInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyFpgaImageAttributeInput) SetDryRun(v bool) *ModifyFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *ModifyFpgaImageAttributeInput) SetFpgaImageId(v string) *ModifyFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +// SetLoadPermission sets the LoadPermission field's value. +func (s *ModifyFpgaImageAttributeInput) SetLoadPermission(v *LoadPermissionModifications) *ModifyFpgaImageAttributeInput { + s.LoadPermission = v + return s +} + +// SetName sets the Name field's value. +func (s *ModifyFpgaImageAttributeInput) SetName(v string) *ModifyFpgaImageAttributeInput { + s.Name = &v + return s +} + +// SetOperationType sets the OperationType field's value. +func (s *ModifyFpgaImageAttributeInput) SetOperationType(v string) *ModifyFpgaImageAttributeInput { + s.OperationType = &v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *ModifyFpgaImageAttributeInput) SetProductCodes(v []*string) *ModifyFpgaImageAttributeInput { + s.ProductCodes = v + return s +} + +// SetUserGroups sets the UserGroups field's value. +func (s *ModifyFpgaImageAttributeInput) SetUserGroups(v []*string) *ModifyFpgaImageAttributeInput { + s.UserGroups = v + return s +} + +// SetUserIds sets the UserIds field's value. +func (s *ModifyFpgaImageAttributeInput) SetUserIds(v []*string) *ModifyFpgaImageAttributeInput { + s.UserIds = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttributeResult +type ModifyFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Information about the attribute. + FpgaImageAttribute *FpgaImageAttribute `locationName:"fpgaImageAttribute" type:"structure"` +} + +// String returns the string representation +func (s ModifyFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetFpgaImageAttribute sets the FpgaImageAttribute field's value. +func (s *ModifyFpgaImageAttributeOutput) SetFpgaImageAttribute(v *FpgaImageAttribute) *ModifyFpgaImageAttributeOutput { + s.FpgaImageAttribute = v + return s +} + // Contains the parameters for ModifyHosts. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyHostsRequest type ModifyHostsInput struct { @@ -51359,6 +52339,90 @@ func (s *ReservedInstancesOffering) SetUsagePrice(v float64) *ReservedInstancesO return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttributeRequest +type ResetFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute. + Attribute *string `type:"string" enum:"ResetFpgaImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetFpgaImageAttributeInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ResetFpgaImageAttributeInput) SetAttribute(v string) *ResetFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ResetFpgaImageAttributeInput) SetDryRun(v bool) *ResetFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *ResetFpgaImageAttributeInput) SetFpgaImageId(v string) *ResetFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttributeResult +type ResetFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ResetFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ResetFpgaImageAttributeOutput) SetReturn(v bool) *ResetFpgaImageAttributeOutput { + s.Return = &v + return s +} + // Contains the parameters for ResetImageAttribute. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetImageAttributeRequest type ResetImageAttributeInput struct { @@ -59132,6 +60196,20 @@ const ( FlowLogsResourceTypeNetworkInterface = "NetworkInterface" ) +const ( + // FpgaImageAttributeNameDescription is a FpgaImageAttributeName enum value + FpgaImageAttributeNameDescription = "description" + + // FpgaImageAttributeNameName is a FpgaImageAttributeName enum value + FpgaImageAttributeNameName = "name" + + // FpgaImageAttributeNameLoadPermission is a FpgaImageAttributeName enum value + FpgaImageAttributeNameLoadPermission = "loadPermission" + + // FpgaImageAttributeNameProductCodes is a FpgaImageAttributeName enum value + FpgaImageAttributeNameProductCodes = "productCodes" +) + const ( // FpgaImageStateCodePending is a FpgaImageStateCode enum value FpgaImageStateCodePending = "pending" @@ -59864,6 +60942,11 @@ const ( ReservedInstanceStateRetired = "retired" ) +const ( + // ResetFpgaImageAttributeNameLoadPermission is a ResetFpgaImageAttributeName enum value + ResetFpgaImageAttributeNameLoadPermission = "loadPermission" +) + const ( // ResetImageAttributeNameLaunchPermission is a ResetImageAttributeName enum value ResetImageAttributeNameLaunchPermission = "launchPermission" diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml index 10f469a2579..984e0736e7d 100644 --- a/vendor/github.com/davecgh/go-spew/.travis.yml +++ b/vendor/github.com/davecgh/go-spew/.travis.yml @@ -1,9 +1,12 @@ language: go -go: 1.2 +go: + - 1.5.4 + - 1.6.3 + - 1.7 install: - - go get -v code.google.com/p/go.tools/cmd/cover + - go get -v golang.org/x/tools/cmd/cover script: - - go test -v -tags=disableunsafe ./spew + - go test -v -tags=safe ./spew - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov after_success: - go get -v github.com/mattn/goveralls diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index 2a7cfd2bf6a..bb67332310b 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -1,3 +1,5 @@ +ISC License + Copyright (c) 2012-2013 Dave Collins Permission to use, copy, modify, and distribute this software for any diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md index 777a8e1d43d..556170ae633 100644 --- a/vendor/github.com/davecgh/go-spew/README.md +++ b/vendor/github.com/davecgh/go-spew/README.md @@ -15,7 +15,7 @@ open source or commercial projects. If you're interested in reading about how this package came to life and some of the challenges involved in providing a deep pretty printer, there is a blog post about it -[here](https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). +[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). ## Documentation @@ -157,7 +157,7 @@ options. See the ConfigState documentation for more details. which only accept pointer receivers from non-pointer variables. This option relies on access to the unsafe package, so it will not have any effect when running in environments without access to the unsafe package such as Google - App Engine or with the "disableunsafe" build tag specified. + App Engine or with the "safe" build tag specified. Pointer method invocation is enabled by default. * ContinueOnMethod @@ -185,9 +185,9 @@ options. See the ConfigState documentation for more details. This package relies on the unsafe package to perform some of the more advanced features, however it also supports a "limited" mode which allows it to work in environments where the unsafe package is not available. By default, it will -operate in this mode on Google App Engine. The "disableunsafe" build tag may -also be specified to force the package to build without using the unsafe -package. +operate in this mode on Google App Engine and when compiled with GopherJS. The +"safe" build tag may also be specified to force the package to build without +using the unsafe package. ## License diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 565bf5899f2..d42a0bc4afc 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -13,9 +13,10 @@ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine and "-tags disableunsafe" -// is not added to the go build command line. -// +build !appengine,!disableunsafe +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 457e41235ed..e47a4e79513 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -13,9 +13,10 @@ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // NOTE: Due to the following build constraints, this file will only be compiled -// when either the code is running on Google App Engine or "-tags disableunsafe" -// is added to the go build command line. -// +build appengine disableunsafe +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go index ee1ab07b3fd..9db83c66fcf 100644 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -64,9 +64,18 @@ type ConfigState struct { // inside these interface methods. As a result, this option relies on // access to the unsafe package, so it will not have any effect when // running in environments without access to the unsafe package such as - // Google App Engine or with the "disableunsafe" build tag specified. + // Google App Engine or with the "safe" build tag specified. DisablePointerMethods bool + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + // ContinueOnMethod specifies whether or not recursion should continue once // a custom error or Stringer interface is invoked. The default, false, // means it will print the results of invoking the custom error or Stringer diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index a0ff95e27e5..95f9dbfe50b 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) { d.w.Write(closeParenBytes) // Display pointer information. - if len(pointerChain) > 0 { + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { d.w.Write(openParenBytes) for i, addr := range pointerChain { if i > 0 { @@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) { case reflect.Map, reflect.String: valueLen = v.Len() } - if valueLen != 0 || valueCap != 0 { + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { d.w.Write(openParenBytes) if valueLen != 0 { d.w.Write(lenEqualsBytes) printInt(d.w, int64(valueLen), 10) } - if valueCap != 0 { + if !d.cs.DisableCapacities && valueCap != 0 { if valueLen != 0 { d.w.Write(spaceBytes) } diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go index 18a38358e05..ed3e3c31a3f 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go +++ b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go @@ -59,10 +59,11 @@ func addCgoDumpTests() { v3Len := fmt.Sprintf("%d", v3l) v3Cap := fmt.Sprintf("%d", v3c) v3t := "[6]testdata._Ctype_unsignedchar" + v3t2 := "[6]testdata._Ctype_uchar" v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + "{\n 00000000 74 65 73 74 33 00 " + " |test3.|\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n") // C signed char array. v4, v4l, v4c := testdata.GetCgoSignedCharArray() diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go index 83e070e9a86..863b62cf569 100644 --- a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go +++ b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go @@ -13,9 +13,10 @@ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine and "-tags disableunsafe" -// is not added to the go build command line. -// +build !appengine,!disableunsafe +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe /* This test file is part of the spew package rather than than the spew_test diff --git a/vendor/github.com/davecgh/go-spew/spew/spew_test.go b/vendor/github.com/davecgh/go-spew/spew/spew_test.go index dbbc0856792..f1f6b716726 100644 --- a/vendor/github.com/davecgh/go-spew/spew/spew_test.go +++ b/vendor/github.com/davecgh/go-spew/spew/spew_test.go @@ -130,12 +130,19 @@ func initSpewTests() { scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} + scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true} + scsNoCap := &spew.ConfigState{DisableCapacities: true} // Variables for tests on types which implement Stringer interface with and // without a pointer receiver. ts := stringer("test") tps := pstringer("test") + type ptrTester struct { + s *struct{} + } + tptr := &ptrTester{s: &struct{}{}} + // depthTester is used to test max depth handling for structs, array, slices // and maps. type depthTester struct { @@ -192,6 +199,10 @@ func initSpewTests() { {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + "(error: 10) 10\n"}, + {scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"}, + {scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"}, + {scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"}, + {scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"}, } } diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore index c5203bf6e73..7adca9439c5 100644 --- a/vendor/github.com/go-ini/ini/.gitignore +++ b/vendor/github.com/go-ini/ini/.gitignore @@ -2,4 +2,3 @@ testdata/conf_out.ini ini.sublime-project ini.sublime-workspace testdata/conf_reflect.ini -.idea diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml deleted file mode 100644 index d9d1b8ffadb..00000000000 --- a/vendor/github.com/go-ini/ini/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -sudo: false -language: go -go: - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - master - -script: - - go get golang.org/x/tools/cmd/cover - - go get github.com/smartystreets/goconvey - - go test -v -cover -race diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile deleted file mode 100644 index ac034e5258f..00000000000 --- a/vendor/github.com/go-ini/ini/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -.PHONY: build test bench vet - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -race -test.bench=. -test.benchmem - -vet: - go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md index e67d51f3204..0652fb7339c 100644 --- a/vendor/github.com/go-ini/ini/README.md +++ b/vendor/github.com/go-ini/ini/README.md @@ -1,4 +1,4 @@ -INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) +ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) === ![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) @@ -9,7 +9,7 @@ Package ini provides INI file read and write functionality in Go. ## Feature -- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. +- Load multiple data sources(`[]byte` or file) with overwrites. - Read with recursion values. - Read with parent-child sections. - Read with auto-increment key names. @@ -22,32 +22,16 @@ Package ini provides INI file read and write functionality in Go. ## Installation -To use a tagged revision: - go get gopkg.in/ini.v1 -To use with latest changes: - - go get github.com/go-ini/ini - -Please add `-u` flag to update in the future. - -### Testing - -If you want to test on your machine, please apply `-t` flag: - - go get -t gopkg.in/ini.v1 - -Please add `-u` flag to update in the future. - ## Getting Started ### Loading from data sources -A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. ```go -cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +cfg, err := ini.Load([]byte("raw data"), "filename") ``` Or start with an empty object: @@ -56,78 +40,12 @@ Or start with an empty object: cfg := ini.Empty() ``` -When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. +When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. ```go err := cfg.Append("other file", []byte("other raw data")) ``` -If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. - -```go -cfg, err := ini.LooseLoad("filename", "filename_404") -``` - -The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. - -#### Ignore cases of key name - -When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. - -```go -cfg, err := ini.InsensitiveLoad("filename") -//... - -// sec1 and sec2 are the exactly same section object -sec1, err := cfg.GetSection("Section") -sec2, err := cfg.GetSection("SecTIOn") - -// key1 and key2 are the exactly same key object -key1, err := sec1.GetKey("Key") -key2, err := sec2.GetKey("KeY") -``` - -#### MySQL-like boolean key - -MySQL's configuration allows a key without value as follows: - -```ini -[mysqld] -... -skip-host-cache -skip-name-resolve -``` - -By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: - -```go -cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) -``` - -The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. - -To generate such keys in your program, you could use `NewBooleanKey`: - -```go -key, err := sec.NewBooleanKey("skip-host-cache") -``` - -#### Comment - -Take care that following format will be treated as comment: - -1. Line begins with `#` or `;` -2. Words after `#` or `;` -3. Words after section name (i.e words after `[some section name]`) - -If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. - -Alternatively, you can use following `LoadOptions` to completely ignore inline comments: - -```go -cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) -``` - ### Working with sections To get a section, you would need to: @@ -145,7 +63,7 @@ section, err := cfg.GetSection("") When you're pretty sure the section exists, following code could make your life easier: ```go -section := cfg.Section("section name") +section := cfg.Section("") ``` What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. @@ -199,7 +117,7 @@ names := cfg.Section("").KeyStrings() To get a clone hash of keys and corresponding values: ```go -hash := cfg.Section("").KeysHash() +hash := cfg.GetSection("").KeysHash() ``` ### Working with values @@ -237,8 +155,8 @@ To get value with types: ```go // For boolean values: -// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On -// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off v, err = cfg.Section("").Key("BOOL").Bool() v, err = cfg.Section("").Key("FLOAT64").Float64() v, err = cfg.Section("").Key("INT").Int() @@ -312,16 +230,6 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 ``` -Well, I hate continuation lines, how do I disable that? - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{ - IgnoreContinuation: true, -}, "filename") -``` - -Holy crap! - Note that single quotes around values will be stripped: ```ini @@ -360,13 +268,9 @@ vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), min vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 ``` -##### Auto-split values into a slice - -To use zero value of type for invalid inputs: +To auto-split value into slice: ```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] vals = cfg.Section("").Key("STRINGS").Strings(",") vals = cfg.Section("").Key("FLOAT64S").Float64s(",") vals = cfg.Section("").Key("INTS").Ints(",") @@ -376,32 +280,6 @@ vals = cfg.Section("").Key("UINT64S").Uint64s(",") vals = cfg.Section("").Key("TIMES").Times(",") ``` -To exclude invalid values out of result slice: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [2.2] -vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") -vals = cfg.Section("").Key("INTS").ValidInts(",") -vals = cfg.Section("").Key("INT64S").ValidInt64s(",") -vals = cfg.Section("").Key("UINTS").ValidUints(",") -vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") -vals = cfg.Section("").Key("TIMES").ValidTimes(",") -``` - -Or to return nothing but error when have invalid inputs: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> error -vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") -vals = cfg.Section("").Key("INTS").StrictInts(",") -vals = cfg.Section("").Key("INT64S").StrictInt64s(",") -vals = cfg.Section("").Key("UINTS").StrictUints(",") -vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") -vals = cfg.Section("").Key("TIMES").StrictTimes(",") -``` - ### Save your configuration Finally, it's time to save your configuration to somewhere. @@ -422,12 +300,6 @@ cfg.WriteTo(writer) cfg.WriteToIndent(writer, "\t") ``` -By default, spaces are used to align "=" sign between key and values, to disable that: - -```go -ini.PrettyFormat = false -``` - ## Advanced Usage ### Recursive Values @@ -469,27 +341,6 @@ CLONE_URL = https://%(IMPORT_PATH)s cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 ``` -#### Retrieve parent keys available to a child section - -```go -cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] -``` - -### Unparseable Sections - -Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: - -```go -cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] -<1> This slide has the fuel listed in the wrong units `)) - -body := cfg.Section("COMMENTS").Body() - -/* --- start --- -<1> This slide has the fuel listed in the wrong units ------- end --- */ -``` - ### Auto-increment Key Names If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. @@ -574,8 +425,8 @@ Why not? ```go type Embeded struct { Dates []time.Time `delim:"|"` - Places []string `ini:"places,omitempty"` - None []int `ini:",omitempty"` + Places []string + None []int } type Author struct { @@ -610,7 +461,8 @@ GPA = 2.8 [Embeded] Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -places = HangZhou,Boston +Places = HangZhou,Boston +None = ``` #### Name Mapper @@ -630,7 +482,7 @@ type Info struct { } func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) // ... cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) @@ -644,26 +496,6 @@ func main() { Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. -#### Value Mapper - -To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: - -```go -type Env struct { - Foo string `ini:"foo"` -} - -func main() { - cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") - cfg.ValueMapper = os.ExpandEnv - // ... - env := &Env{} - err = cfg.Section("env").MapTo(env) -} -``` - -This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. - #### Other Notes On Map/Reflect Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md index 0cf4194492c..27fdb32fee0 100644 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -2,7 +2,7 @@ ## 功能特性 -- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) +- 支持覆盖加载多个数据源(`[]byte` 或文件) - 支持递归读取键值 - 支持读取父子分区 - 支持读取自增键名 @@ -15,32 +15,16 @@ ## 下载安装 -使用一个特定版本: - go get gopkg.in/ini.v1 -使用最新版: - - go get github.com/go-ini/ini - -如需更新请添加 `-u` 选项。 - -### 测试安装 - -如果您想要在自己的机器上运行测试,请使用 `-t` 标记: - - go get -t gopkg.in/ini.v1 - -如需更新请添加 `-u` 选项。 - ## 开始使用 ### 从数据源加载 -一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 +一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 ```go -cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +cfg, err := ini.Load([]byte("raw data"), "filename") ``` 或者从一个空白的文件开始: @@ -55,72 +39,6 @@ cfg := ini.Empty() err := cfg.Append("other file", []byte("other raw data")) ``` -当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): - -```go -cfg, err := ini.LooseLoad("filename", "filename_404") -``` - -更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 - -#### 忽略键名的大小写 - -有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: - -```go -cfg, err := ini.InsensitiveLoad("filename") -//... - -// sec1 和 sec2 指向同一个分区对象 -sec1, err := cfg.GetSection("Section") -sec2, err := cfg.GetSection("SecTIOn") - -// key1 和 key2 指向同一个键对象 -key1, err := sec1.GetKey("Key") -key2, err := sec2.GetKey("KeY") -``` - -#### 类似 MySQL 配置中的布尔值键 - -MySQL 的配置文件中会出现没有具体值的布尔类型的键: - -```ini -[mysqld] -... -skip-host-cache -skip-name-resolve -``` - -默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: - -```go -cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) -``` - -这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 - -如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: - -```go -key, err := sec.NewBooleanKey("skip-host-cache") -``` - -#### 关于注释 - -下述几种情况的内容将被视为注释: - -1. 所有以 `#` 或 `;` 开头的行 -2. 所有在 `#` 或 `;` 之后的内容 -3. 分区标签后的文字 (即 `[分区名]` 之后的内容) - -如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 - -除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释: - -```go -cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) -``` - ### 操作分区(Section) 获取指定分区: @@ -138,7 +56,7 @@ section, err := cfg.GetSection("") 当您非常确定某个分区是存在的,可以使用以下简便方法: ```go -section := cfg.Section("section name") +section := cfg.Section("") ``` 如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 @@ -192,7 +110,7 @@ names := cfg.Section("").KeyStrings() 获取分区下的所有键值对的克隆: ```go -hash := cfg.Section("").KeysHash() +hash := cfg.GetSection("").KeysHash() ``` ### 操作键值(Value) @@ -230,8 +148,8 @@ yes := cfg.Section("").HasValue("test value") ```go // 布尔值的规则: -// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On -// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off v, err = cfg.Section("").Key("BOOL").Bool() v, err = cfg.Section("").Key("FLOAT64").Float64() v, err = cfg.Section("").Key("INT").Int() @@ -305,16 +223,6 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 ``` -可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{ - IgnoreContinuation: true, -}, "filename") -``` - -哇靠给力啊! - 需要注意的是,值两侧的单引号会被自动剔除: ```ini @@ -353,13 +261,9 @@ vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), min vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 ``` -##### 自动分割键值到切片(slice) - -当存在无效输入时,使用零值代替: +自动分割键值为切片(slice): ```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] vals = cfg.Section("").Key("STRINGS").Strings(",") vals = cfg.Section("").Key("FLOAT64S").Float64s(",") vals = cfg.Section("").Key("INTS").Ints(",") @@ -369,32 +273,6 @@ vals = cfg.Section("").Key("UINT64S").Uint64s(",") vals = cfg.Section("").Key("TIMES").Times(",") ``` -从结果切片中剔除无效输入: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [2.2] -vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") -vals = cfg.Section("").Key("INTS").ValidInts(",") -vals = cfg.Section("").Key("INT64S").ValidInt64s(",") -vals = cfg.Section("").Key("UINTS").ValidUints(",") -vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") -vals = cfg.Section("").Key("TIMES").ValidTimes(",") -``` - -当存在无效输入时,直接返回错误: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> error -vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") -vals = cfg.Section("").Key("INTS").StrictInts(",") -vals = cfg.Section("").Key("INT64S").StrictInt64s(",") -vals = cfg.Section("").Key("UINTS").StrictUints(",") -vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") -vals = cfg.Section("").Key("TIMES").StrictTimes(",") -``` - ### 保存配置 终于到了这个时刻,是时候保存一下配置了。 @@ -415,15 +293,9 @@ cfg.WriteTo(writer) cfg.WriteToIndent(writer, "\t") ``` -默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: - -```go -ini.PrettyFormat = false -``` - -## 高级用法 +### 高级用法 -### 递归读取键值 +#### 递归读取键值 在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 @@ -443,7 +315,7 @@ cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini ``` -### 读取父子分区 +#### 读取父子分区 您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 @@ -462,28 +334,7 @@ CLONE_URL = https://%(IMPORT_PATH)s cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 ``` -#### 获取上级父分区下的所有键名 - -```go -cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] -``` - -### 无法解析的分区 - -如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: - -```go -cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] -<1> This slide has the fuel listed in the wrong units `)) - -body := cfg.Section("COMMENTS").Body() - -/* --- start --- -<1> This slide has the fuel listed in the wrong units ------- end --- */ -``` - -### 读取自增键名 +#### 读取自增键名 如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 @@ -565,8 +416,8 @@ p := &Person{ ```go type Embeded struct { Dates []time.Time `delim:"|"` - Places []string `ini:"places,omitempty"` - None []int `ini:",omitempty"` + Places []string + None []int } type Author struct { @@ -601,7 +452,8 @@ GPA = 2.8 [Embeded] Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -places = HangZhou,Boston +Places = HangZhou,Boston +None = ``` #### 名称映射器(Name Mapper) @@ -621,7 +473,7 @@ type Info struct{ } func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) // ... cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) @@ -635,26 +487,6 @@ func main() { 使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 -#### 值映射器(Value Mapper) - -值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: - -```go -type Env struct { - Foo string `ini:"foo"` -} - -func main() { - cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") - cfg.ValueMapper = os.ExpandEnv - // ... - env := &Env{} - err = cfg.Section("env").MapTo(env) -} -``` - -本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 - #### 映射/反射的其它说明 任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go deleted file mode 100644 index 80afe743158..00000000000 --- a/vendor/github.com/go-ini/ini/error.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -type ErrDelimiterNotFound struct { - Line string -} - -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 7f3c4d1ed1f..8b09ad3644f 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -16,51 +16,40 @@ package ini import ( + "bufio" "bytes" "errors" "fmt" "io" - "io/ioutil" "os" "regexp" "runtime" + "strconv" "strings" "sync" + "time" ) const ( - // Name for default section. You can use this constant or the string literal. - // In most of cases, an empty string is all you need to access the section. DEFAULT_SECTION = "DEFAULT" - // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.28.2" + + _VERSION = "1.7.0" ) -// Version returns current package version literal. func Version() string { return _VERSION } var ( - // Delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows - // at package init time. LineBreak = "\n" // Variable regexp pattern: %(variable)s varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - // Indicate whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. + // Write spaces around "=" to look better. PrettyFormat = true - - // Explicitly write DEFAULT section header - DefaultHeader = false - - // Indicate whether to put a line between sections - PrettySection = true ) func init() { @@ -78,12 +67,11 @@ func inSlice(str string, s []string) bool { return false } -// dataSource is an interface that returns object which can be read and closed. +// dataSource is a interface that returns file content. type dataSource interface { ReadCloser() (io.ReadCloser, error) } -// sourceFile represents an object that contains content on the local file system. type sourceFile struct { name string } @@ -104,24 +92,618 @@ func (rc *bytesReadCloser) Close() error { return nil } -// sourceData represents an object that contains content in memory. type sourceData struct { data []byte } func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// ____ __. +// | |/ _|____ ___.__. +// | <_/ __ < | | +// | | \ ___/\___ | +// |____|__ \___ > ____| +// \/ \/\/ + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string devide by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 devide by given delimiter. +func (k *Key) Float64s(delim string) []float64 { + strs := k.Strings(delim) + vals := make([]float64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseFloat(strs[i], 64) + } + return vals +} + +// Ints returns list of int devide by given delimiter. +func (k *Key) Ints(delim string) []int { + strs := k.Strings(delim) + vals := make([]int, len(strs)) + for i := range strs { + vals[i], _ = strconv.Atoi(strs[i]) + } + return vals +} + +// Int64s returns list of int64 devide by given delimiter. +func (k *Key) Int64s(delim string) []int64 { + strs := k.Strings(delim) + vals := make([]int64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseInt(strs[i], 10, 64) + } + return vals +} + +// Uints returns list of uint devide by given delimiter. +func (k *Key) Uints(delim string) []uint { + strs := k.Strings(delim) + vals := make([]uint, len(strs)) + for i := range strs { + u, _ := strconv.ParseUint(strs[i], 10, 64) + vals[i] = uint(u) + } + return vals +} + +// Uint64s returns list of uint64 devide by given delimiter. +func (k *Key) Uint64s(delim string) []uint64 { + strs := k.Strings(delim) + vals := make([]uint64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseUint(strs[i], 10, 64) + } + return vals +} + +// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. +func (k *Key) TimesFormat(format, delim string) []time.Time { + strs := k.Strings(delim) + vals := make([]time.Time, len(strs)) + for i := range strs { + vals[i], _ = time.Parse(format, strs[i]) + } + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + k.value = v +} + +// _________ __ .__ +// / _____/ ____ _____/ |_|__| ____ ____ +// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ +// / \ ___/\ \___| | | ( <_> ) | \ +// /_______ /\___ >\___ >__| |__|\____/|___| / +// \/ \/ \/ \/ + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) Haskey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// HasKey returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list } -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash } -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } } +// ___________.__.__ +// \_ _____/|__| | ____ +// | __) | | | _/ __ \ +// | \ | | |_\ ___/ +// \___ / |__|____/\___ > +// \/ \/ + // File represents a combination of a or more INI file(s) in memory. type File struct { // Should make things safe, but sometimes doesn't matter. @@ -137,20 +719,16 @@ type File struct { // To keep data in order. sectionList []string - options LoadOptions - NameMapper - ValueMapper } // newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { +func newFile(dataSources []dataSource) *File { return &File{ BlockMode: true, dataSources: dataSources, sections: make(map[string]*Section), sectionList: make([]string, 0, 10), - options: opts, } } @@ -160,33 +738,14 @@ func parseDataSource(source interface{}) (dataSource, error) { return sourceFile{s}, nil case []byte: return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil default: return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) } } -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. - IgnoreInlineComment bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // AllowShadows indicates whether to keep track of keys with same name under same section. - AllowShadows bool - // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string -} - -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +func Load(source interface{}, others ...interface{}) (_ *File, err error) { sources := make([]dataSource, len(others)+1) sources[0], err = parseDataSource(source) if err != nil { @@ -198,36 +757,8 @@ func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ return nil, err } } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it allows have shadow keys. -func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{AllowShadows: true}, source, others...) + f := newFile(sources) + return f, f.Reload() } // Empty returns an empty file object. @@ -241,8 +772,6 @@ func Empty() *File { func (f *File) NewSection(name string) (*Section, error) { if len(name) == 0 { return nil, errors.New("error creating new section: empty section name") - } else if f.options.Insensitive && name != DEFAULT_SECTION { - name = strings.ToLower(name) } if f.BlockMode { @@ -259,18 +788,6 @@ func (f *File) NewSection(name string) (*Section, error) { return f.sections[name], nil } -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - // NewSections creates a list of sections. func (f *File) NewSections(names ...string) (err error) { for _, name := range names { @@ -285,8 +802,6 @@ func (f *File) NewSections(names ...string) (err error) { func (f *File) GetSection(name string) (*Section, error) { if len(name) == 0 { name = DEFAULT_SECTION - } else if f.options.Insensitive { - name = strings.ToLower(name) } if f.BlockMode { @@ -296,7 +811,7 @@ func (f *File) GetSection(name string) (*Section, error) { sec := f.sections[name] if sec == nil { - return nil, fmt.Errorf("section '%s' does not exist", name) + return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) } return sec, nil } @@ -322,11 +837,6 @@ func (f *File) Sections() []*Section { return sections } -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - // SectionStrings returns list of section names. func (f *File) SectionStrings() []string { list := make([]string, len(f.sectionList)) @@ -354,6 +864,240 @@ func (f *File) DeleteSection(name string) { } } +func cutComment(str string) string { + i := strings.Index(str, "#") + if i == -1 { + return str + } + return str[:i] +} + +func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) { + isEnd := false + for { + next, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return "", err + } + isEnd = true + } + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + break + } + val += next + if isEnd { + return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) { + isEnd := false + for { + valLen := len(val) + if valLen == 0 || val[valLen-1] != '\\' { + break + } + val = val[:valLen-1] + + next, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return "", isEnd, err + } + isEnd = true + } + + next = strings.TrimSpace(next) + if len(next) == 0 { + break + } + val += next + } + return val, isEnd, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) error { + buf := bufio.NewReader(reader) + + // Handle BOM-UTF8. + // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding + mask, err := buf.Peek(3) + if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + buf.Read(mask) + } + + count := 1 + comments := "" + isEnd := false + + section, err := f.NewSection(DEFAULT_SECTION) + if err != nil { + return err + } + + for { + line, err := buf.ReadString('\n') + line = strings.TrimSpace(line) + length := len(line) + + // Check error and ignore io.EOF just for a moment. + if err != nil { + if err != io.EOF { + return fmt.Errorf("error reading next line: %v", err) + } + // The last line of file could be an empty line. + if length == 0 { + break + } + isEnd = true + } + + // Skip empty lines. + if length == 0 { + continue + } + + switch { + case line[0] == '#' || line[0] == ';': // Comments. + if len(comments) == 0 { + comments = line + } else { + comments += LineBreak + line + } + continue + case line[0] == '[' && line[length-1] == ']': // New sction. + section, err = f.NewSection(strings.TrimSpace(line[1 : length-1])) + if err != nil { + return err + } + + if len(comments) > 0 { + section.Comment = comments + comments = "" + } + // Reset counter. + count = 1 + continue + } + + // Other possibilities. + var ( + i int + keyQuote string + kname string + valQuote string + val string + ) + + // Key name surrounded by quotes. + if line[0] == '"' { + if length > 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + if len(keyQuote) > 0 { + qLen := len(keyQuote) + pos := strings.Index(line[qLen:], keyQuote) + if pos == -1 { + return fmt.Errorf("error parsing line: missing closing key quote: %s", line) + } + pos = pos + qLen + i = strings.IndexAny(line[pos:], "=:") + if i < 0 { + return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) + } else if i == pos { + return fmt.Errorf("error parsing line: key is empty: %s", line) + } + i = i + pos + kname = line[qLen:pos] // Just keep spaces inside quotes. + } else { + i = strings.IndexAny(line, "=:") + if i < 0 { + return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) + } else if i == 0 { + return fmt.Errorf("error parsing line: key is empty: %s", line) + } + kname = strings.TrimSpace(line[0:i]) + } + + isAutoIncr := false + // Auto increment. + if kname == "-" { + isAutoIncr = true + kname = "#" + fmt.Sprint(count) + count++ + } + + lineRight := strings.TrimSpace(line[i+1:]) + lineRightLength := len(lineRight) + firstChar := "" + if lineRightLength >= 2 { + firstChar = lineRight[0:1] + } + if firstChar == "`" { + valQuote = "`" + } else if firstChar == `"` { + if lineRightLength >= 3 && lineRight[0:3] == `"""` { + valQuote = `"""` + } else { + valQuote = `"` + } + } else if firstChar == `'` { + valQuote = `'` + } + + if len(valQuote) > 0 { + qLen := len(valQuote) + pos := strings.LastIndex(lineRight[qLen:], valQuote) + // For multiple-line value check. + if pos == -1 { + if valQuote == `"` || valQuote == `'` { + return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line) + } + + val = lineRight[qLen:] + "\n" + val, err = checkMultipleLines(buf, line, val, valQuote) + if err != nil { + return err + } + } else { + val = lineRight[qLen : pos+qLen] + } + } else { + val = strings.TrimSpace(cutComment(lineRight)) + val, isEnd, err = checkContinuationLines(buf, val) + if err != nil { + return err + } + } + + k, err := section.NewKey(kname, val) + if err != nil { + return err + } + k.isAutoIncr = isAutoIncr + if len(comments) > 0 { + k.Comment = comments + comments = "" + } + + if isEnd { + break + } + } + return nil +} + func (f *File) reload(s dataSource) error { r, err := s.ReadCloser() if err != nil { @@ -368,11 +1112,6 @@ func (f *File) reload(s dataSource) error { func (f *File) Reload() (err error) { for _, s := range f.dataSources { if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - f.parse(bytes.NewBuffer(nil)) - continue - } return err } } @@ -396,7 +1135,8 @@ func (f *File) Append(source interface{}, others ...interface{}) error { return f.Reload() } -func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { +// WriteToIndent writes file content into io.Writer with given value indention. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { equalSign := "=" if PrettyFormat { equalSign = " = " @@ -410,51 +1150,22 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { if sec.Comment[0] != '#' && sec.Comment[0] != ';' { sec.Comment = "; " + sec.Comment } - if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil { - return nil, err + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err } } - if i > 0 || DefaultHeader { - if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return nil, err + if i > 0 { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err } } else { - // Write nothing if default section is empty + // Write nothing if default section is empty. if len(sec.keyList) == 0 { continue } } - if sec.isRawSection { - if _, err := buf.WriteString(sec.rawBody); err != nil { - return nil, err - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modifed if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.ContainsAny(kname, "\"=:") { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KEY_LIST: for _, kname := range sec.keyList { key := sec.Key(kname) if len(key.Comment) > 0 { @@ -464,8 +1175,8 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { if key.Comment[0] != '#' && key.Comment[0] != ';' { key.Comment = "; " + key.Comment } - if _, err := buf.WriteString(key.Comment + LineBreak); err != nil { - return nil, err + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err } } @@ -474,62 +1185,31 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { } switch { - case key.isAutoIncrement: + case key.isAutoIncr: kname = "-" - case strings.ContainsAny(kname, "\"=:"): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): + case strings.Contains(kname, "`") || strings.Contains(kname, `"`): kname = `"""` + kname + `"""` + case strings.Contains(kname, `=`) || strings.Contains(kname, `:`): + kname = "`" + kname + "`" } - for _, val := range key.ValueWithShadows() { - if _, err := buf.WriteString(kname); err != nil { - return nil, err - } - - if key.isBooleanType { - if kname != sec.keyList[len(sec.keyList)-1] { - buf.WriteString(LineBreak) - } - continue KEY_LIST - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } - if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { - return nil, err - } + val := key.value + // In case key value contains "\n", "`" or "\"". + if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) || + strings.Contains(val, "#") { + val = `"""` + val + `"""` + } + if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { + return 0, err } } - if PrettySection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } + // Put a line between sections. + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err } } - return buf, nil -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { - buf, err := f.writeToBuffer(indent) - if err != nil { - return 0, err - } return buf.WriteTo(w) } @@ -542,12 +1222,23 @@ func (f *File) WriteTo(w io.Writer) (int64, error) { func (f *File) SaveToIndent(filename, indent string) error { // Note: Because we are truncating with os.Create, // so it's safer to save to a temporary file location and rename afte done. - buf, err := f.writeToBuffer(indent) + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) if err != nil { return err } - return ioutil.WriteFile(filename, buf.Bytes(), 0666) + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) } // SaveTo writes content to file system. diff --git a/vendor/github.com/go-ini/ini/ini_test.go b/vendor/github.com/go-ini/ini/ini_test.go index b3dd217c643..50e414afbe8 100644 --- a/vendor/github.com/go-ini/ini/ini_test.go +++ b/vendor/github.com/go-ini/ini/ini_test.go @@ -15,8 +15,7 @@ package ini import ( - "bytes" - "io/ioutil" + "fmt" "strings" "testing" "time" @@ -32,19 +31,19 @@ func Test_Version(t *testing.T) { const _CONF_DATA = ` ; Package name -NAME = ini +NAME = ini ; Package version -VERSION = v1 +VERSION = v1 ; Package import path IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s # Information about package author # Bio can be written in multiple lines. [author] -NAME = Unknwon ; Succeeding comment +NAME = Unknwon # Succeeding comment E-MAIL = fake@localhost GITHUB = https://github.com/%(NAME)s -BIO = """Gopher. +BIO = """Gopher. Coding addict. Good man. """ # Succeeding comment @@ -61,42 +60,33 @@ UNUSED_KEY = should be deleted -: Support load multiple files to overwrite key values [types] -STRING = str -BOOL = true +STRING = str +BOOL = true BOOL_FALSE = false -FLOAT64 = 1.25 -INT = 10 -TIME = 2015-01-01T20:17:05Z -DURATION = 2h45m -UINT = 3 +FLOAT64 = 1.25 +INT = 10 +TIME = 2015-01-01T20:17:05Z +DURATION = 2h45m +UINT = 3 [array] -STRINGS = en, zh, de +STRINGS = en, zh, de FLOAT64S = 1.1, 2.2, 3.3 -INTS = 1, 2, 3 -UINTS = 1, 2, 3 -TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z +INTS = 1, 2, 3 +UINTS = 1, 2, 3 +TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z [note] empty_lines = next line is empty\ -; Comment before the section -[comments] ; This is a comment for the section too -; Comment before key -key = "value" -key2 = "value2" ; This is a comment for key2 -key3 = "one", "two", "three" - [advance] value with quotes = "some value" value quote2 again = 'some value' -includes comment sign = ` + "`" + "my#password" + "`" + ` -includes comment sign2 = ` + "`" + "my;password" + "`" + ` -true = 2+3=5 +true = """"2+3=5"""" "1+1=2" = true """6+1=7""" = true """` + "`" + `5+5` + "`" + `""" = 10 -` + "`" + `"6+6"` + "`" + ` = 12 +""""6+6"""" = 12 ` + "`" + `7-2=4` + "`" + ` = false ADDRESS = ` + "`" + `404 road, NotFound, State, 50000` + "`" + ` @@ -117,21 +107,9 @@ func Test_Load(t *testing.T) { }) Convey("Load with multiple data sources", func() { - cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini", ioutil.NopCloser(bytes.NewReader([]byte(_CONF_DATA)))) - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) - - f, err := Load([]byte(_CONF_DATA), "testdata/404.ini") - So(err, ShouldNotBeNil) - So(f, ShouldBeNil) - }) - - Convey("Load with io.ReadCloser", func() { - cfg, err := Load(ioutil.NopCloser(bytes.NewReader([]byte(_CONF_DATA)))) + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") So(err, ShouldBeNil) So(cfg, ShouldNotBeNil) - - So(cfg.Section("").Key("NAME").String(), ShouldEqual, "ini") }) }) @@ -141,9 +119,8 @@ func Test_Load(t *testing.T) { _, err := Load(_CONF_DATA) So(err, ShouldNotBeNil) - f, err := Load("testdata/404.ini") + _, err = Load("testdata/404.ini") So(err, ShouldNotBeNil) - So(f, ShouldBeNil) _, err = Load(1) So(err, ShouldNotBeNil) @@ -152,12 +129,9 @@ func Test_Load(t *testing.T) { So(err, ShouldNotBeNil) }) - Convey("Load with bad section name", func() { + Convey("Load with empty section name", func() { _, err := Load([]byte("[]")) So(err, ShouldNotBeNil) - - _, err = Load([]byte("[")) - So(err, ShouldNotBeNil) }) Convey("Load with bad keys", func() { @@ -180,118 +154,310 @@ func Test_Load(t *testing.T) { Convey("Load with bad values", func() { _, err := Load([]byte(`name="""Unknwon`)) So(err, ShouldNotBeNil) + + _, err = Load([]byte(`key = "value`)) + So(err, ShouldNotBeNil) }) }) +} - Convey("Get section and key insensitively", t, func() { - cfg, err := InsensitiveLoad([]byte(_CONF_DATA), "testdata/conf.ini") +func Test_Values(t *testing.T) { + Convey("Test getting and setting values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") So(err, ShouldBeNil) So(cfg, ShouldNotBeNil) - sec, err := cfg.GetSection("Author") - So(err, ShouldBeNil) - So(sec, ShouldNotBeNil) + Convey("Get values in default section", func() { + sec := cfg.Section("") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").Value(), ShouldEqual, "ini") + So(sec.Key("NAME").String(), ShouldEqual, "ini") + So(sec.Key("NAME").Validate(func(in string) string { + return in + }), ShouldEqual, "ini") + So(sec.Key("NAME").Comment, ShouldEqual, "; Package name") + So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1") + }) - key, err := sec.GetKey("E-mail") - So(err, ShouldBeNil) - So(key, ShouldNotBeNil) - }) + Convey("Get values in non-default section", func() { + sec := cfg.Section("author") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").String(), ShouldEqual, "Unknwon") + So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon") - Convey("Load with ignoring continuation lines", t, func() { - cfg, err := LoadSources(LoadOptions{IgnoreContinuation: true}, []byte(`key1=a\b\ -key2=c\d\`)) - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) + sec = cfg.Section("package") + So(sec, ShouldNotBeNil) + So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) - So(cfg.Section("").Key("key1").String(), ShouldEqual, `a\b\`) - So(cfg.Section("").Key("key2").String(), ShouldEqual, `c\d\`) - }) + Convey("Get auto-increment key names", func() { + keys := cfg.Section("features").Keys() + for i, k := range keys { + So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1)) + } + }) - Convey("Load with ignoring inline comments", t, func() { - cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, []byte(`key1=value ;comment -key2=value #comment2`)) - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) + Convey("Get overwrite value", func() { + So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") + }) - So(cfg.Section("").Key("key1").String(), ShouldEqual, `value ;comment`) - So(cfg.Section("").Key("key2").String(), ShouldEqual, `value #comment2`) + Convey("Get sections", func() { + sections := cfg.Sections() + for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "advance"} { + So(sections[i].Name(), ShouldEqual, name) + } + }) - var buf bytes.Buffer - cfg.WriteTo(&buf) - So(buf.String(), ShouldEqual, `key1 = value ;comment -key2 = value #comment2 + Convey("Get parent section value", func() { + So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) -`) - }) + Convey("Get multiple line value", func() { + So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n") + }) - Convey("Load with boolean type keys", t, func() { - cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, []byte(`key1=hello -key2 -#key3 -key4 -key5`)) - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) + Convey("Get values with type", func() { + sec := cfg.Section("types") + v1, err := sec.Key("BOOL").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeTrue) - So(strings.Join(cfg.Section("").KeyStrings(), ","), ShouldEqual, "key1,key2,key4,key5") - So(cfg.Section("").Key("key2").MustBool(false), ShouldBeTrue) - - var buf bytes.Buffer - cfg.WriteTo(&buf) - // there is always a trailing \n at the end of the section - So(buf.String(), ShouldEqual, `key1 = hello -key2 -#key3 -key4 -key5 -`) - }) -} + v1, err = sec.Key("BOOL_FALSE").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeFalse) -func Test_File_ChildSections(t *testing.T) { - Convey("Find child sections by parent name", t, func() { - cfg, err := Load([]byte(` -[node] + v2, err := sec.Key("FLOAT64").Float64() + So(err, ShouldBeNil) + So(v2, ShouldEqual, 1.25) -[node.biz1] + v3, err := sec.Key("INT").Int() + So(err, ShouldBeNil) + So(v3, ShouldEqual, 10) -[node.biz2] + v4, err := sec.Key("INT").Int64() + So(err, ShouldBeNil) + So(v4, ShouldEqual, 10) -[node.biz3] + v5, err := sec.Key("UINT").Uint() + So(err, ShouldBeNil) + So(v5, ShouldEqual, 3) -[node.bizN] -`)) - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) + v6, err := sec.Key("UINT").Uint64() + So(err, ShouldBeNil) + So(v6, ShouldEqual, 3) - children := cfg.ChildSections("node") - names := make([]string, len(children)) - for i := range children { - names[i] = children[i].name - } - So(strings.Join(names, ","), ShouldEqual, "node.biz1,node.biz2,node.biz3,node.bizN") - }) -} + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + v7, err := sec.Key("TIME").Time() + So(err, ShouldBeNil) + So(v7.String(), ShouldEqual, t.String()) + + Convey("Must get values with type", func() { + So(sec.Key("STRING").MustString("404"), ShouldEqual, "str") + So(sec.Key("BOOL").MustBool(), ShouldBeTrue) + So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25) + So(sec.Key("INT").MustInt(), ShouldEqual, 10) + So(sec.Key("INT").MustInt64(), ShouldEqual, 10) + So(sec.Key("UINT").MustUint(), ShouldEqual, 3) + So(sec.Key("UINT").MustUint64(), ShouldEqual, 3) + So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must get values with default value", func() { + So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404") + So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue) + So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5) + So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15) + So(sec.Key("INT_404").MustInt64(15), ShouldEqual, 15) + So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6) + So(sec.Key("UINT_404").MustUint64(6), ShouldEqual, 6) + + t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String()) + + So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds()) + }) + }) + }) + + Convey("Get value with candidates", func() { + sec := cfg.Section("types") + So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3) -func Test_LooseLoad(t *testing.T) { - Convey("Loose load from data sources", t, func() { - Convey("Loose load mixed with nonexistent file", func() { - cfg, err := LooseLoad("testdata/404.ini") + zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) - var fake struct { - Name string `ini:"name"` + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + + Convey("Get value with candidates and default value", func() { + So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values in range", func() { + sec := cfg.Section("types") + So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25) + So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10) + So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10) + + minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z") + So(err, ShouldBeNil) + maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String()) + + Convey("Get value in range with default value", func() { + So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5) + So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7) + So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7) + So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values into slice", func() { + sec := cfg.Section("array") + So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de") + So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0) + + vals1 := sec.Key("FLOAT64S").Float64s(",") + for i, v := range []float64{1.1, 2.2, 3.3} { + So(vals1[i], ShouldEqual, v) + } + + vals2 := sec.Key("INTS").Ints(",") + for i, v := range []int{1, 2, 3} { + So(vals2[i], ShouldEqual, v) + } + + vals3 := sec.Key("INTS").Int64s(",") + for i, v := range []int64{1, 2, 3} { + So(vals3[i], ShouldEqual, v) + } + + vals4 := sec.Key("UINTS").Uints(",") + for i, v := range []uint{1, 2, 3} { + So(vals4[i], ShouldEqual, v) + } + + vals5 := sec.Key("UINTS").Uint64s(",") + for i, v := range []uint64{1, 2, 3} { + So(vals5[i], ShouldEqual, v) + } + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").Times(",") + for i, v := range []time.Time{t, t, t} { + So(vals6[i].String(), ShouldEqual, v.String()) } - So(cfg.MapTo(&fake), ShouldBeNil) + }) + + Convey("Get key hash", func() { + cfg.Section("").KeysHash() + }) + + Convey("Set key value", func() { + k := cfg.Section("author").Key("NAME") + k.SetValue("无闻") + So(k.String(), ShouldEqual, "无闻") + }) + + Convey("Get key strings", func() { + So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT") + }) + + Convey("Delete a key", func() { + cfg.Section("package.sub").DeleteKey("UNUSED_KEY") + _, err := cfg.Section("package.sub").GetKey("UNUSED_KEY") + So(err, ShouldNotBeNil) + }) + + Convey("Has Key", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.Haskey("UNUSED_KEY") + haskey2 := sec.Haskey("CLONE_URL") + haskey3 := sec.Haskey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Value", func() { + sec := cfg.Section("author") + hasvalue1 := sec.HasValue("Unknwon") + hasvalue2 := sec.HasValue("doc") + So(hasvalue1, ShouldBeTrue) + So(hasvalue2, ShouldBeFalse) + }) + + Convey("Get section strings", func() { + So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,advance") + }) + + Convey("Delete a section", func() { + cfg.DeleteSection("") + So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION) + }) - cfg, err = LooseLoad([]byte("name=Unknwon"), "testdata/404.ini") + Convey("Create new sections", func() { + cfg.NewSections("test", "test2") + _, err := cfg.GetSection("test") + So(err, ShouldBeNil) + _, err = cfg.GetSection("test2") So(err, ShouldBeNil) - So(cfg.Section("").Key("name").String(), ShouldEqual, "Unknwon") - So(cfg.MapTo(&fake), ShouldBeNil) - So(fake.Name, ShouldEqual, "Unknwon") }) }) + Convey("Test getting and setting bad values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Create new key with empty name", func() { + k, err := cfg.Section("").NewKey("", "") + So(err, ShouldNotBeNil) + So(k, ShouldBeNil) + }) + + Convey("Create new section with empty name", func() { + s, err := cfg.NewSection("") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + }) + + Convey("Create new sections with empty name", func() { + So(cfg.NewSections(""), ShouldNotBeNil) + }) + + Convey("Get section that not exists", func() { + s, err := cfg.GetSection("404") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + + s = cfg.Section("404") + So(s, ShouldNotBeNil) + }) + }) } func Test_File_Append(t *testing.T) { @@ -309,15 +475,7 @@ func Test_File_Append(t *testing.T) { }) } -func Test_File_WriteTo(t *testing.T) { - Convey("Write to somewhere", t, func() { - var buf bytes.Buffer - cfg := Empty() - cfg.WriteTo(&buf) - }) -} - -func Test_File_SaveTo_WriteTo(t *testing.T) { +func Test_File_SaveTo(t *testing.T) { Convey("Save file", t, func() { cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") So(err, ShouldBeNil) @@ -327,165 +485,46 @@ func Test_File_SaveTo_WriteTo(t *testing.T) { cfg.Section("author").Comment = `Information about package author # Bio can be written in multiple lines.` cfg.Section("advanced").Key("val w/ pound").SetValue("my#password") - cfg.Section("advanced").Key("longest key has a colon : yes/no").SetValue("yes") So(cfg.SaveTo("testdata/conf_out.ini"), ShouldBeNil) cfg.Section("author").Key("NAME").Comment = "This is author name" - So(cfg.SaveToIndent("testdata/conf_out.ini", "\t"), ShouldBeNil) - - var buf bytes.Buffer - _, err = cfg.WriteToIndent(&buf, "\t") - So(err, ShouldBeNil) - So(buf.String(), ShouldEqual, `; Package name -NAME = ini -; Package version -VERSION = v1 -; Package import path -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -; Information about package author -# Bio can be written in multiple lines. -[author] - ; This is author name - NAME = Unknwon - E-MAIL = u@gogs.io - GITHUB = https://github.com/%(NAME)s - # Succeeding comment - BIO = """Gopher. -Coding addict. -Good man. -""" - -[package] - CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] - UNUSED_KEY = should be deleted - -[features] - - = Support read/write comments of keys and sections - - = Support auto-increment of key names - - = Support load multiple files to overwrite key values - -[types] - STRING = str - BOOL = true - BOOL_FALSE = false - FLOAT64 = 1.25 - INT = 10 - TIME = 2015-01-01T20:17:05Z - DURATION = 2h45m - UINT = 3 - -[array] - STRINGS = en, zh, de - FLOAT64S = 1.1, 2.2, 3.3 - INTS = 1, 2, 3 - UINTS = 1, 2, 3 - TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z - -[note] - empty_lines = next line is empty - -; Comment before the section -; This is a comment for the section too -[comments] - ; Comment before key - key = value - ; This is a comment for key2 - key2 = value2 - key3 = "one", "two", "three" - -[advance] - value with quotes = some value - value quote2 again = some value - includes comment sign = `+"`"+"my#password"+"`"+` - includes comment sign2 = `+"`"+"my;password"+"`"+` - true = 2+3=5 - `+"`"+`1+1=2`+"`"+` = true - `+"`"+`6+1=7`+"`"+` = true - """`+"`"+`5+5`+"`"+`""" = 10 - `+"`"+`"6+6"`+"`"+` = 12 - `+"`"+`7-2=4`+"`"+` = false - ADDRESS = """404 road, -NotFound, State, 50000""" - two_lines = how about continuation lines? - lots_of_lines = 1 2 3 4 - -[advanced] - val w/ pound = `+"`"+`my#password`+"`"+` - `+"`"+`longest key has a colon : yes/no`+"`"+` = yes - -`) }) } -func Test_File_WriteTo_SectionRaw(t *testing.T) { - Convey("Write a INI with a raw section", t, func() { - var buf bytes.Buffer - cfg, err := LoadSources( - LoadOptions{ - UnparseableSections: []string{"CORE_LESSON", "COMMENTS"}, - }, - "testdata/aicc.ini") - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) - cfg.WriteToIndent(&buf, "\t") - So(buf.String(), ShouldEqual, `[Core] - Lesson_Location = 87 - Lesson_Status = C - Score = 3 - Time = 00:02:30 - -[CORE_LESSON] -my lesson state data – 1111111111111111111000000000000000001110000 -111111111111111111100000000000111000000000 – end my lesson state data -[COMMENTS] -<1> This slide has the fuel listed in the wrong units -`) - }) -} - -// Helpers for slice tests. -func float64sEqual(values []float64, expected ...float64) { - So(values, ShouldHaveLength, len(expected)) - for i, v := range expected { - So(values[i], ShouldEqual, v) - } -} - -func intsEqual(values []int, expected ...int) { - So(values, ShouldHaveLength, len(expected)) - for i, v := range expected { - So(values[i], ShouldEqual, v) +func Benchmark_Key_Value(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() } } -func int64sEqual(values []int64, expected ...int64) { - So(values, ShouldHaveLength, len(expected)) - for i, v := range expected { - So(values[i], ShouldEqual, v) +func Benchmark_Key_String(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").String() } } -func uintsEqual(values []uint, expected ...uint) { - So(values, ShouldHaveLength, len(expected)) - for i, v := range expected { - So(values[i], ShouldEqual, v) +func Benchmark_Key_Value_NonBlock(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = false + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() } } -func uint64sEqual(values []uint64, expected ...uint64) { - So(values, ShouldHaveLength, len(expected)) - for i, v := range expected { - So(values[i], ShouldEqual, v) +func Benchmark_Key_String_NonBlock(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = false + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").String() } } -func timesEqual(values []time.Time, expected ...time.Time) { - So(values, ShouldHaveLength, len(expected)) - for i, v := range expected { - So(values[i].String(), ShouldEqual, v.String()) +func Benchmark_Key_SetValue(b *testing.B) { + c, _ := Load([]byte(_CONF_DATA)) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").SetValue("10") } } diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go deleted file mode 100644 index 838356af01b..00000000000 --- a/vendor/github.com/go-ini/ini/key.go +++ /dev/null @@ -1,699 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - name string - value string - isAutoIncrement bool - isBooleanType bool - - isShadow bool - shadows []*Key - - Comment string -} - -// newKey simply return a key object with given values. -func newKey(s *Section, name, val string) *Key { - return &Key{ - s: s, - name: name, - value: val, - } -} - -func (k *Key) addShadow(val string) error { - if k.isShadow { - return errors.New("cannot add shadow to another shadow key") - } else if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add shadow to auto-increment or boolean key") - } - - shadow := newKey(k.s, k.name, val) - shadow.isShadow = true - k.shadows = append(k.shadows, shadow) - return nil -} - -// AddShadow adds a new shadow key to itself. -func (k *Key) AddShadow(val string) error { - if !k.s.f.options.AllowShadows { - return errors.New("shadow key is not allowed") - } - return k.addShadow(val) -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// ValueWithShadows returns raw values of key and its shadows if any. -func (k *Key) ValueWithShadows() []string { - if len(k.shadows) == 0 { - return []string{k.value} - } - vals := make([]string, len(k.shadows)+1) - vals[0] = k.value - for i := range k.shadows { - vals[i+1] = k.shadows[i].value - } - return vals -} - -// transformValue takes a raw value and transforms to its final string. -func (k *Key) transformValue(val string) string { - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - - // Fail-fast if no indicate char found for recursive value - if !strings.Contains(val, "%") { - return val - } - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// String returns string representation of value. -func (k *Key) String() string { - return k.transformValue(k.value) -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - vals := strings.Split(str, delim) - for i := range vals { - // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) - vals[i] = strings.TrimSpace(vals[i]) - } - return vals -} - -// StringsWithShadows returns list of string divided by given delimiter. -// Shadows will also be appended if any. -func (k *Key) StringsWithShadows(delim string) []string { - vals := k.ValueWithShadows() - results := make([]string, 0, len(vals)*2) - for i := range vals { - if len(vals) == 0 { - continue - } - - results = append(results, strings.Split(vals[i], delim)...) - } - - for i := range results { - results[i] = k.transformValue(strings.TrimSpace(results[i])) - } - return results -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.parseFloat64s(k.Strings(delim), false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.parseInts(k.Strings(delim), false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.parseInt64s(k.Strings(delim), false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.parseUints(k.Strings(delim), false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.parseUint64s(k.Strings(delim), false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.parseTimesFormat(format, k.Strings(delim), false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// parseFloat64s transforms strings to float64s. -func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { - vals := make([]float64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseFloat(str, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInts transforms strings to ints. -func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { - vals := make([]int, 0, len(strs)) - for _, str := range strs { - val, err := strconv.Atoi(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInt64s transforms strings to int64s. -func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { - vals := make([]int64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseUints transforms strings to uints. -func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { - vals := make([]uint, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 0) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, uint(val)) - } - } - return vals, nil -} - -// parseUint64s transforms strings to uint64s. -func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - vals := make([]uint64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseTimesFormat transforms strings to times in given format. -func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - vals := make([]time.Time, 0, len(strs)) - for _, str := range strs { - val, err := time.Parse(format, str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/vendor/github.com/go-ini/ini/key_test.go b/vendor/github.com/go-ini/ini/key_test.go deleted file mode 100644 index 1281d5bf0c6..00000000000 --- a/vendor/github.com/go-ini/ini/key_test.go +++ /dev/null @@ -1,573 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "fmt" - "strings" - "testing" - "time" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_Key(t *testing.T) { - Convey("Test getting and setting values", t, func() { - cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) - - Convey("Get values in default section", func() { - sec := cfg.Section("") - So(sec, ShouldNotBeNil) - So(sec.Key("NAME").Value(), ShouldEqual, "ini") - So(sec.Key("NAME").String(), ShouldEqual, "ini") - So(sec.Key("NAME").Validate(func(in string) string { - return in - }), ShouldEqual, "ini") - So(sec.Key("NAME").Comment, ShouldEqual, "; Package name") - So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1") - }) - - Convey("Get values in non-default section", func() { - sec := cfg.Section("author") - So(sec, ShouldNotBeNil) - So(sec.Key("NAME").String(), ShouldEqual, "Unknwon") - So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon") - - sec = cfg.Section("package") - So(sec, ShouldNotBeNil) - So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") - }) - - Convey("Get auto-increment key names", func() { - keys := cfg.Section("features").Keys() - for i, k := range keys { - So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1)) - } - }) - - Convey("Get parent-keys that are available to the child section", func() { - parentKeys := cfg.Section("package.sub").ParentKeys() - for _, k := range parentKeys { - So(k.Name(), ShouldEqual, "CLONE_URL") - } - }) - - Convey("Get overwrite value", func() { - So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") - }) - - Convey("Get sections", func() { - sections := cfg.Sections() - for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "comments", "advance"} { - So(sections[i].Name(), ShouldEqual, name) - } - }) - - Convey("Get parent section value", func() { - So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") - So(cfg.Section("package.fake.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") - }) - - Convey("Get multiple line value", func() { - So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n") - }) - - Convey("Get values with type", func() { - sec := cfg.Section("types") - v1, err := sec.Key("BOOL").Bool() - So(err, ShouldBeNil) - So(v1, ShouldBeTrue) - - v1, err = sec.Key("BOOL_FALSE").Bool() - So(err, ShouldBeNil) - So(v1, ShouldBeFalse) - - v2, err := sec.Key("FLOAT64").Float64() - So(err, ShouldBeNil) - So(v2, ShouldEqual, 1.25) - - v3, err := sec.Key("INT").Int() - So(err, ShouldBeNil) - So(v3, ShouldEqual, 10) - - v4, err := sec.Key("INT").Int64() - So(err, ShouldBeNil) - So(v4, ShouldEqual, 10) - - v5, err := sec.Key("UINT").Uint() - So(err, ShouldBeNil) - So(v5, ShouldEqual, 3) - - v6, err := sec.Key("UINT").Uint64() - So(err, ShouldBeNil) - So(v6, ShouldEqual, 3) - - t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") - So(err, ShouldBeNil) - v7, err := sec.Key("TIME").Time() - So(err, ShouldBeNil) - So(v7.String(), ShouldEqual, t.String()) - - Convey("Must get values with type", func() { - So(sec.Key("STRING").MustString("404"), ShouldEqual, "str") - So(sec.Key("BOOL").MustBool(), ShouldBeTrue) - So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25) - So(sec.Key("INT").MustInt(), ShouldEqual, 10) - So(sec.Key("INT").MustInt64(), ShouldEqual, 10) - So(sec.Key("UINT").MustUint(), ShouldEqual, 3) - So(sec.Key("UINT").MustUint64(), ShouldEqual, 3) - So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String()) - - dur, err := time.ParseDuration("2h45m") - So(err, ShouldBeNil) - So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds()) - - Convey("Must get values with default value", func() { - So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404") - So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue) - So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5) - So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15) - So(sec.Key("INT64_404").MustInt64(15), ShouldEqual, 15) - So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6) - So(sec.Key("UINT64_404").MustUint64(6), ShouldEqual, 6) - - t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z") - So(err, ShouldBeNil) - So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String()) - - So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds()) - - Convey("Must should set default as key value", func() { - So(sec.Key("STRING_404").String(), ShouldEqual, "404") - So(sec.Key("BOOL_404").String(), ShouldEqual, "true") - So(sec.Key("FLOAT64_404").String(), ShouldEqual, "2.5") - So(sec.Key("INT_404").String(), ShouldEqual, "15") - So(sec.Key("INT64_404").String(), ShouldEqual, "15") - So(sec.Key("UINT_404").String(), ShouldEqual, "6") - So(sec.Key("UINT64_404").String(), ShouldEqual, "6") - So(sec.Key("TIME_404").String(), ShouldEqual, "2014-01-01T20:17:05Z") - So(sec.Key("DURATION_404").String(), ShouldEqual, "2h45m0s") - }) - }) - }) - }) - - Convey("Get value with candidates", func() { - sec := cfg.Section("types") - So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str") - So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) - So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10) - So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10) - So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3) - So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3) - - zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") - So(err, ShouldBeNil) - t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") - So(err, ShouldBeNil) - So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) - - Convey("Get value with candidates and default value", func() { - So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str") - So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) - So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10) - So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10) - So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3) - So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3) - So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) - }) - }) - - Convey("Get values in range", func() { - sec := cfg.Section("types") - So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25) - So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10) - So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10) - - minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") - So(err, ShouldBeNil) - midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z") - So(err, ShouldBeNil) - maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z") - So(err, ShouldBeNil) - t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") - So(err, ShouldBeNil) - So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String()) - - Convey("Get value in range with default value", func() { - So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5) - So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7) - So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7) - So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String()) - }) - }) - - Convey("Get values into slice", func() { - sec := cfg.Section("array") - So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de") - So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0) - - vals1 := sec.Key("FLOAT64S").Float64s(",") - float64sEqual(vals1, 1.1, 2.2, 3.3) - - vals2 := sec.Key("INTS").Ints(",") - intsEqual(vals2, 1, 2, 3) - - vals3 := sec.Key("INTS").Int64s(",") - int64sEqual(vals3, 1, 2, 3) - - vals4 := sec.Key("UINTS").Uints(",") - uintsEqual(vals4, 1, 2, 3) - - vals5 := sec.Key("UINTS").Uint64s(",") - uint64sEqual(vals5, 1, 2, 3) - - t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") - So(err, ShouldBeNil) - vals6 := sec.Key("TIMES").Times(",") - timesEqual(vals6, t, t, t) - }) - - Convey("Get valid values into slice", func() { - sec := cfg.Section("array") - vals1 := sec.Key("FLOAT64S").ValidFloat64s(",") - float64sEqual(vals1, 1.1, 2.2, 3.3) - - vals2 := sec.Key("INTS").ValidInts(",") - intsEqual(vals2, 1, 2, 3) - - vals3 := sec.Key("INTS").ValidInt64s(",") - int64sEqual(vals3, 1, 2, 3) - - vals4 := sec.Key("UINTS").ValidUints(",") - uintsEqual(vals4, 1, 2, 3) - - vals5 := sec.Key("UINTS").ValidUint64s(",") - uint64sEqual(vals5, 1, 2, 3) - - t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") - So(err, ShouldBeNil) - vals6 := sec.Key("TIMES").ValidTimes(",") - timesEqual(vals6, t, t, t) - }) - - Convey("Get values one type into slice of another type", func() { - sec := cfg.Section("array") - vals1 := sec.Key("STRINGS").ValidFloat64s(",") - So(vals1, ShouldBeEmpty) - - vals2 := sec.Key("STRINGS").ValidInts(",") - So(vals2, ShouldBeEmpty) - - vals3 := sec.Key("STRINGS").ValidInt64s(",") - So(vals3, ShouldBeEmpty) - - vals4 := sec.Key("STRINGS").ValidUints(",") - So(vals4, ShouldBeEmpty) - - vals5 := sec.Key("STRINGS").ValidUint64s(",") - So(vals5, ShouldBeEmpty) - - vals6 := sec.Key("STRINGS").ValidTimes(",") - So(vals6, ShouldBeEmpty) - }) - - Convey("Get valid values into slice without errors", func() { - sec := cfg.Section("array") - vals1, err := sec.Key("FLOAT64S").StrictFloat64s(",") - So(err, ShouldBeNil) - float64sEqual(vals1, 1.1, 2.2, 3.3) - - vals2, err := sec.Key("INTS").StrictInts(",") - So(err, ShouldBeNil) - intsEqual(vals2, 1, 2, 3) - - vals3, err := sec.Key("INTS").StrictInt64s(",") - So(err, ShouldBeNil) - int64sEqual(vals3, 1, 2, 3) - - vals4, err := sec.Key("UINTS").StrictUints(",") - So(err, ShouldBeNil) - uintsEqual(vals4, 1, 2, 3) - - vals5, err := sec.Key("UINTS").StrictUint64s(",") - So(err, ShouldBeNil) - uint64sEqual(vals5, 1, 2, 3) - - t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") - So(err, ShouldBeNil) - vals6, err := sec.Key("TIMES").StrictTimes(",") - So(err, ShouldBeNil) - timesEqual(vals6, t, t, t) - }) - - Convey("Get invalid values into slice", func() { - sec := cfg.Section("array") - vals1, err := sec.Key("STRINGS").StrictFloat64s(",") - So(vals1, ShouldBeEmpty) - So(err, ShouldNotBeNil) - - vals2, err := sec.Key("STRINGS").StrictInts(",") - So(vals2, ShouldBeEmpty) - So(err, ShouldNotBeNil) - - vals3, err := sec.Key("STRINGS").StrictInt64s(",") - So(vals3, ShouldBeEmpty) - So(err, ShouldNotBeNil) - - vals4, err := sec.Key("STRINGS").StrictUints(",") - So(vals4, ShouldBeEmpty) - So(err, ShouldNotBeNil) - - vals5, err := sec.Key("STRINGS").StrictUint64s(",") - So(vals5, ShouldBeEmpty) - So(err, ShouldNotBeNil) - - vals6, err := sec.Key("STRINGS").StrictTimes(",") - So(vals6, ShouldBeEmpty) - So(err, ShouldNotBeNil) - }) - - Convey("Get key hash", func() { - cfg.Section("").KeysHash() - }) - - Convey("Set key value", func() { - k := cfg.Section("author").Key("NAME") - k.SetValue("无闻") - So(k.String(), ShouldEqual, "无闻") - }) - - Convey("Get key strings", func() { - So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT") - }) - - Convey("Delete a key", func() { - cfg.Section("package.sub").DeleteKey("UNUSED_KEY") - _, err := cfg.Section("package.sub").GetKey("UNUSED_KEY") - So(err, ShouldNotBeNil) - }) - - Convey("Has Key (backwards compatible)", func() { - sec := cfg.Section("package.sub") - haskey1 := sec.Haskey("UNUSED_KEY") - haskey2 := sec.Haskey("CLONE_URL") - haskey3 := sec.Haskey("CLONE_URL_NO") - So(haskey1, ShouldBeTrue) - So(haskey2, ShouldBeTrue) - So(haskey3, ShouldBeFalse) - }) - - Convey("Has Key", func() { - sec := cfg.Section("package.sub") - haskey1 := sec.HasKey("UNUSED_KEY") - haskey2 := sec.HasKey("CLONE_URL") - haskey3 := sec.HasKey("CLONE_URL_NO") - So(haskey1, ShouldBeTrue) - So(haskey2, ShouldBeTrue) - So(haskey3, ShouldBeFalse) - }) - - Convey("Has Value", func() { - sec := cfg.Section("author") - hasvalue1 := sec.HasValue("Unknwon") - hasvalue2 := sec.HasValue("doc") - So(hasvalue1, ShouldBeTrue) - So(hasvalue2, ShouldBeFalse) - }) - }) - - Convey("Test getting and setting bad values", t, func() { - cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) - - Convey("Create new key with empty name", func() { - k, err := cfg.Section("").NewKey("", "") - So(err, ShouldNotBeNil) - So(k, ShouldBeNil) - }) - - Convey("Create new section with empty name", func() { - s, err := cfg.NewSection("") - So(err, ShouldNotBeNil) - So(s, ShouldBeNil) - }) - - Convey("Create new sections with empty name", func() { - So(cfg.NewSections(""), ShouldNotBeNil) - }) - - Convey("Get section that not exists", func() { - s, err := cfg.GetSection("404") - So(err, ShouldNotBeNil) - So(s, ShouldBeNil) - - s = cfg.Section("404") - So(s, ShouldNotBeNil) - }) - }) - - Convey("Test key hash clone", t, func() { - cfg, err := Load([]byte(strings.Replace("network=tcp,addr=127.0.0.1:6379,db=4,pool_size=100,idle_timeout=180", ",", "\n", -1))) - So(err, ShouldBeNil) - for _, v := range cfg.Section("").KeysHash() { - So(len(v), ShouldBeGreaterThan, 0) - } - }) - - Convey("Key has empty value", t, func() { - _conf := `key1= -key2= ; comment` - cfg, err := Load([]byte(_conf)) - So(err, ShouldBeNil) - So(cfg.Section("").Key("key1").Value(), ShouldBeEmpty) - }) -} - -const _CONF_GIT_CONFIG = ` -[remote "origin"] - url = https://github.com/Antergone/test1.git - url = https://github.com/Antergone/test2.git -` - -func Test_Key_Shadows(t *testing.T) { - Convey("Shadows keys", t, func() { - Convey("Disable shadows", func() { - cfg, err := Load([]byte(_CONF_GIT_CONFIG)) - So(err, ShouldBeNil) - So(cfg.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test2.git") - }) - - Convey("Enable shadows", func() { - cfg, err := ShadowLoad([]byte(_CONF_GIT_CONFIG)) - So(err, ShouldBeNil) - So(cfg.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test1.git") - So(strings.Join(cfg.Section(`remote "origin"`).Key("url").ValueWithShadows(), " "), ShouldEqual, - "https://github.com/Antergone/test1.git https://github.com/Antergone/test2.git") - - Convey("Save with shadows", func() { - var buf bytes.Buffer - _, err := cfg.WriteTo(&buf) - So(err, ShouldBeNil) - So(buf.String(), ShouldEqual, `[remote "origin"] -url = https://github.com/Antergone/test1.git -url = https://github.com/Antergone/test2.git - -`) - }) - }) - }) -} - -func newTestFile(block bool) *File { - c, _ := Load([]byte(_CONF_DATA)) - c.BlockMode = block - return c -} - -func Benchmark_Key_Value(b *testing.B) { - c := newTestFile(true) - for i := 0; i < b.N; i++ { - c.Section("").Key("NAME").Value() - } -} - -func Benchmark_Key_Value_NonBlock(b *testing.B) { - c := newTestFile(false) - for i := 0; i < b.N; i++ { - c.Section("").Key("NAME").Value() - } -} - -func Benchmark_Key_Value_ViaSection(b *testing.B) { - c := newTestFile(true) - sec := c.Section("") - for i := 0; i < b.N; i++ { - sec.Key("NAME").Value() - } -} - -func Benchmark_Key_Value_ViaSection_NonBlock(b *testing.B) { - c := newTestFile(false) - sec := c.Section("") - for i := 0; i < b.N; i++ { - sec.Key("NAME").Value() - } -} - -func Benchmark_Key_Value_Direct(b *testing.B) { - c := newTestFile(true) - key := c.Section("").Key("NAME") - for i := 0; i < b.N; i++ { - key.Value() - } -} - -func Benchmark_Key_Value_Direct_NonBlock(b *testing.B) { - c := newTestFile(false) - key := c.Section("").Key("NAME") - for i := 0; i < b.N; i++ { - key.Value() - } -} - -func Benchmark_Key_String(b *testing.B) { - c := newTestFile(true) - for i := 0; i < b.N; i++ { - _ = c.Section("").Key("NAME").String() - } -} - -func Benchmark_Key_String_NonBlock(b *testing.B) { - c := newTestFile(false) - for i := 0; i < b.N; i++ { - _ = c.Section("").Key("NAME").String() - } -} - -func Benchmark_Key_String_ViaSection(b *testing.B) { - c := newTestFile(true) - sec := c.Section("") - for i := 0; i < b.N; i++ { - _ = sec.Key("NAME").String() - } -} - -func Benchmark_Key_String_ViaSection_NonBlock(b *testing.B) { - c := newTestFile(false) - sec := c.Section("") - for i := 0; i < b.N; i++ { - _ = sec.Key("NAME").String() - } -} - -func Benchmark_Key_SetValue(b *testing.B) { - c := newTestFile(true) - for i := 0; i < b.N; i++ { - c.Section("").Key("NAME").SetValue("10") - } -} - -func Benchmark_Key_SetValue_VisSection(b *testing.B) { - c := newTestFile(true) - sec := c.Section("") - for i := 0; i < b.N; i++ { - sec.Key("NAME").SetValue("10") - } -} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go deleted file mode 100644 index 69d5476273b..00000000000 --- a/vendor/github.com/go-ini/ini/parser.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - "unicode" -) - -type tokenType int - -const ( - _TOKEN_INVALID tokenType = iota - _TOKEN_COMMENT - _TOKEN_SECTION - _TOKEN_KEY -) - -type parser struct { - buf *bufio.Reader - isEOF bool - count int - comment *bytes.Buffer -} - -func newParser(r io.Reader) *parser { - return &parser{ - buf: bufio.NewReader(r), - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - p.buf.Read(mask) - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - p.buf.Read(mask) - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && string(line[0:3]) == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - endIdx := -1 - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], "=:") - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, "=:") - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) { - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - return "", nil - } - - var valQuote string - if len(line) > 3 && string(line[0:3]) == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - return line[startIdx : pos+startIdx], nil - } - - // Won't be able to reach here if value only contains whitespace - line = strings.TrimSpace(line) - - // Check continuation lines when desired - if !ignoreContinuation && line[len(line)-1] == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - // Check if ignore inline comment - if !ignoreInlineComment { - i := strings.IndexAny(line, "#;") - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - } - - // Trim single quotes - if hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"') { - line = line[1 : len(line)-1] - } - return line, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - section, _ := f.NewSection(DEFAULT_SECTION) - - var line []byte - var inUnparseableSection bool - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 - closeIdx := bytes.LastIndex(line, []byte("]")) - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset aotu-counter and comments - p.comment.Reset() - p.count = 1 - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(line) - if err != nil { - // Treat as boolean key when desired, and whole line is key name. - if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { - kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err - } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment) - if err != nil { - return err - } - - key, err := section.NewKey(kname, value) - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - } - return nil -} diff --git a/vendor/github.com/go-ini/ini/parser_test.go b/vendor/github.com/go-ini/ini/parser_test.go deleted file mode 100644 index 05258195be7..00000000000 --- a/vendor/github.com/go-ini/ini/parser_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_BOM(t *testing.T) { - Convey("Test handling BOM", t, func() { - Convey("UTF-8-BOM", func() { - cfg, err := Load("testdata/UTF-8-BOM.ini") - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) - - So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") - }) - - Convey("UTF-16-LE-BOM", func() { - cfg, err := Load("testdata/UTF-16-LE-BOM.ini") - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) - }) - - Convey("UTF-16-BE-BOM", func() { - }) - }) -} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go deleted file mode 100644 index 94f7375ed4c..00000000000 --- a/vendor/github.com/go-ini/ini/section.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - if s.f.options.AllowShadows { - if err := s.keys[name].addShadow(val); err != nil { - return nil, err - } - } else { - s.keys[name].value = val - } - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = newKey(s, name, val) - s.keysHash[name] = val - return s.keys[name], nil -} - -// NewBooleanKey creates a new boolean type key to given section. -func (s *Section) NewBooleanKey(name string) (*Key, error) { - key, err := s.NewKey(name, "true") - if err != nil { - return nil, err - } - - key.isBooleanType = true - return key, nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Haskey is a backwards-compatible name for HasKey. -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} - -// ChildSections returns a list of child sections of current section. -// For example, "[parent.child1]" and "[parent.child12]" are child sections -// of section "[parent]". -func (s *Section) ChildSections() []*Section { - prefix := s.name + "." - children := make([]*Section, 0, 3) - for _, name := range s.f.sectionList { - if strings.HasPrefix(name, prefix) { - children = append(children, s.f.sections[name]) - } - } - return children -} diff --git a/vendor/github.com/go-ini/ini/section_test.go b/vendor/github.com/go-ini/ini/section_test.go deleted file mode 100644 index 80282c1979f..00000000000 --- a/vendor/github.com/go-ini/ini/section_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "strings" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_Section(t *testing.T) { - Convey("Test CRD sections", t, func() { - cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) - - Convey("Get section strings", func() { - So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,comments,advance") - }) - - Convey("Delete a section", func() { - cfg.DeleteSection("") - So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION) - }) - - Convey("Create new sections", func() { - cfg.NewSections("test", "test2") - _, err := cfg.GetSection("test") - So(err, ShouldBeNil) - _, err = cfg.GetSection("test2") - So(err, ShouldBeNil) - }) - }) -} - -func Test_SectionRaw(t *testing.T) { - Convey("Test section raw string", t, func() { - cfg, err := LoadSources( - LoadOptions{ - Insensitive: true, - UnparseableSections: []string{"core_lesson", "comments"}, - }, - "testdata/aicc.ini") - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) - - Convey("Get section strings", func() { - So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,core,core_lesson,comments") - }) - - Convey("Validate non-raw section", func() { - val, err := cfg.Section("core").GetKey("lesson_status") - So(err, ShouldBeNil) - So(val.String(), ShouldEqual, "C") - }) - - Convey("Validate raw section", func() { - So(cfg.Section("core_lesson").Body(), ShouldEqual, `my lesson state data – 1111111111111111111000000000000000001110000 -111111111111111111100000000000111000000000 – end my lesson state data`) - }) - }) -} \ No newline at end of file diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go index eeb8dabaaca..c1184371010 100644 --- a/vendor/github.com/go-ini/ini/struct.go +++ b/vendor/github.com/go-ini/ini/struct.go @@ -19,7 +19,6 @@ import ( "errors" "fmt" "reflect" - "strings" "time" "unicode" ) @@ -77,80 +76,10 @@ func parseDelim(actual string) string { var reflectTime = reflect.TypeOf(time.Now()).Kind() -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - var strs []string - if allowShadow { - strs = key.StringsWithShadows(delim) - } else { - strs = key.Strings(delim) - } - - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - var err error - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals, err = key.parseInts(strs, true, false) - case reflect.Int64: - vals, err = key.parseInt64s(strs, true, false) - case reflect.Uint: - vals, err = key.parseUints(strs, true, false) - case reflect.Uint64: - vals, err = key.parseUint64s(strs, true, false) - case reflect.Float64: - vals, err = key.parseFloat64s(strs, true, false) - case reflectTime: - vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - if isStrict { - return err - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -func wrapStrictError(err error, isStrict bool) error { - if isStrict { - return err - } - return nil -} - // setWithProperType sets proper value to field based on its type, // but it does not return error for failing parsing, // because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { switch t.Kind() { case reflect.String: if len(key.String()) == 0 { @@ -160,70 +89,78 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri case reflect.Bool: boolVal, err := key.Bool() if err != nil { - return wrapStrictError(err, isStrict) + return nil } field.SetBool(boolVal) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: durationVal, err := key.Duration() - // Skip zero value - if err == nil && int(durationVal) > 0 { + if err == nil { field.Set(reflect.ValueOf(durationVal)) return nil } intVal, err := key.Int64() if err != nil { - return wrapStrictError(err, isStrict) + return nil } field.SetInt(intVal) // byte is an alias for uint8, so supporting uint8 breaks support for byte case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: durationVal, err := key.Duration() - // Skip zero value - if err == nil && int(durationVal) > 0 { + if err == nil { field.Set(reflect.ValueOf(durationVal)) return nil } uintVal, err := key.Uint64() if err != nil { - return wrapStrictError(err, isStrict) + return nil } field.SetUint(uintVal) - case reflect.Float32, reflect.Float64: + case reflect.Float64: floatVal, err := key.Float64() if err != nil { - return wrapStrictError(err, isStrict) + return nil } field.SetFloat(floatVal) case reflectTime: timeVal, err := key.Time() if err != nil { - return wrapStrictError(err, isStrict) + return nil } field.Set(reflect.ValueOf(timeVal)) case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + vals := key.Strings(delim) + numVals := len(vals) + if numVals == 0 { + return nil + } + + sliceOf := field.Type().Elem().Kind() + + var times []time.Time + if sliceOf == reflectTime { + times = key.Times(delim) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(times[i])) + default: + slice.Index(i).Set(reflect.ValueOf(vals[i])) + } + } + field.Set(slice) default: return fmt.Errorf("unsupported type '%s'", t) } return nil } -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { - opts := strings.SplitN(tag, ",", 3) - rawName = opts[0] - if len(opts) > 1 { - omitEmpty = opts[1] == "omitempty" - } - if len(opts) > 2 { - allowShadow = opts[2] == "allowshadow" - } - return rawName, omitEmpty, allowShadow -} - -func (s *Section) mapTo(val reflect.Value, isStrict bool) error { +func (s *Section) mapTo(val reflect.Value) error { if val.Kind() == reflect.Ptr { val = val.Elem() } @@ -238,8 +175,7 @@ func (s *Section) mapTo(val reflect.Value, isStrict bool) error { continue } - rawName, _, allowShadow := parseTagOptions(tag) - fieldName := s.parseFieldName(tpField.Name, rawName) + fieldName := s.parseFieldName(tpField.Name, tag) if len(fieldName) == 0 || !field.CanSet() { continue } @@ -252,7 +188,7 @@ func (s *Section) mapTo(val reflect.Value, isStrict bool) error { if isAnonymous || isStruct { if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field, isStrict); err != nil { + if err = sec.mapTo(field); err != nil { return fmt.Errorf("error mapping field(%s): %v", fieldName, err) } continue @@ -260,8 +196,7 @@ func (s *Section) mapTo(val reflect.Value, isStrict bool) error { } if key, err := s.GetKey(fieldName); err == nil { - delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { return fmt.Errorf("error mapping field(%s): %v", fieldName, err) } } @@ -280,22 +215,7 @@ func (s *Section) MapTo(v interface{}) error { return errors.New("cannot map to non-pointer struct") } - return s.mapTo(val, false) -} - -// MapTo maps section to given struct in strict mode, -// which returns all possible error including value parsing error. -func (s *Section) StrictMapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val, true) + return s.mapTo(val) } // MapTo maps file to given struct. @@ -303,12 +223,6 @@ func (f *File) MapTo(v interface{}) error { return f.Section("").MapTo(v) } -// MapTo maps file to given struct in strict mode, -// which returns all possible error including value parsing error. -func (f *File) StrictMapTo(v interface{}) error { - return f.Section("").StrictMapTo(v) -} - // MapTo maps data sources to given struct with name mapper. func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { cfg, err := Load(source, others...) @@ -319,104 +233,45 @@ func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, other return cfg.MapTo(v) } -// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, -// which returns all possible error including value parsing error. -func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.StrictMapTo(v) -} - // MapTo maps data sources to given struct. func MapTo(v, source interface{}, others ...interface{}) error { return MapToWithMapper(v, nil, source, others...) } -// StrictMapTo maps data sources to given struct in strict mode, -// which returns all possible error including value parsing error. -func StrictMapTo(v, source interface{}, others ...interface{}) error { - return StrictMapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - sliceOf := field.Type().Elem().Kind() - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. +// reflectWithProperType does the opposite thing with setWithProperType. func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { switch t.Kind() { case reflect.String: key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, + reflectTime: + key.SetValue(fmt.Sprint(field)) case reflect.Slice: - return reflectSliceWithProperType(key, field, delim) + vals := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + isTime := fmt.Sprint(field.Type()) == "[]time.Time" + for i := 0; i < field.Len(); i++ { + if isTime { + buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) + } else { + buf.WriteString(fmt.Sprint(vals.Index(i))) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) default: return fmt.Errorf("unsupported type '%s'", t) } return nil } -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflectTime: - t, ok := v.Interface().(time.Time) - return ok && t.IsZero() - } - return false -} - func (s *Section) reflectFrom(val reflect.Value) error { if val.Kind() == reflect.Ptr { val = val.Elem() @@ -432,18 +287,13 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } - opts := strings.SplitN(tag, ",", 2) - if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { - continue - } - - fieldName := s.parseFieldName(tpField.Name, opts[0]) + fieldName := s.parseFieldName(tpField.Name, tag) if len(fieldName) == 0 || !field.CanSet() { continue } if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + (tpField.Type.Kind() == reflect.Struct) { // Note: The only error here is section doesn't exist. sec, err := s.f.GetSection(fieldName) if err != nil { @@ -451,7 +301,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { sec, _ = s.f.NewSection(fieldName) } if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) } continue } @@ -462,7 +312,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { key, _ = s.NewKey(fieldName, "") } if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) } } diff --git a/vendor/github.com/go-ini/ini/struct_test.go b/vendor/github.com/go-ini/ini/struct_test.go index b8ba2529355..d865ad78eb7 100644 --- a/vendor/github.com/go-ini/ini/struct_test.go +++ b/vendor/github.com/go-ini/ini/struct_test.go @@ -15,8 +15,6 @@ package ini import ( - "bytes" - "fmt" "strings" "testing" "time" @@ -25,15 +23,10 @@ import ( ) type testNested struct { - Cities []string `delim:"|"` - Visits []time.Time - Years []int - Numbers []int64 - Ages []uint - Populations []uint64 - Coordinates []float64 - Note string - Unused int `ini:"-"` + Cities []string `delim:"|"` + Visits []time.Time + Note string + Unused int `ini:"-"` } type testEmbeded struct { @@ -51,9 +44,6 @@ type testStruct struct { *testEmbeded `ini:"grade"` Unused int `ini:"-"` Unsigned uint - Omitted bool `ini:"omitthis,omitempty"` - Shadows []string `ini:",,allowshadow"` - ShadowInts []int `ini:"Shadows,,allowshadow"` } const _CONF_DATA_STRUCT = ` @@ -64,18 +54,10 @@ Money = 1.25 Born = 1993-10-07T20:17:05Z Duration = 2h45m Unsigned = 3 -omitthis = true -Shadows = 1, 2 -Shadows = 3, 4 [Others] Cities = HangZhou|Boston Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z -Years = 1993,1994 -Numbers = 10010,10086 -Ages = 18,19 -Populations = 12345678,98765432 -Coordinates = 192.168,10.11 Note = Hello world! [grade] @@ -148,11 +130,6 @@ func Test_Struct(t *testing.T) { So(strings.Join(ts.Others.Cities, ","), ShouldEqual, "HangZhou,Boston") So(ts.Others.Visits[0].String(), ShouldEqual, t.String()) - So(fmt.Sprint(ts.Others.Years), ShouldEqual, "[1993 1994]") - So(fmt.Sprint(ts.Others.Numbers), ShouldEqual, "[10010 10086]") - So(fmt.Sprint(ts.Others.Ages), ShouldEqual, "[18 19]") - So(fmt.Sprint(ts.Others.Populations), ShouldEqual, "[12345678 98765432]") - So(fmt.Sprint(ts.Others.Coordinates), ShouldEqual, "[192.168 10.11]") So(ts.Others.Note, ShouldEqual, "Hello world!") So(ts.testEmbeded.GPA, ShouldEqual, 2.8) }) @@ -191,23 +168,6 @@ func Test_Struct(t *testing.T) { So(cfg.MapTo(&unsupport4{}), ShouldNotBeNil) }) - Convey("Map to omitempty field", func() { - ts := new(testStruct) - So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil) - - So(ts.Omitted, ShouldEqual, true) - }) - - Convey("Map with shadows", func() { - cfg, err := LoadSources(LoadOptions{AllowShadows: true}, []byte(_CONF_DATA_STRUCT)) - So(err, ShouldBeNil) - ts := new(testStruct) - So(cfg.MapTo(ts), ShouldBeNil) - - So(strings.Join(ts.Shadows, " "), ShouldEqual, "1 2 3 4") - So(fmt.Sprintf("%v", ts.ShadowInts), ShouldEqual, "[1 2 3 4]") - }) - Convey("Map from invalid data source", func() { So(MapTo(&testStruct{}, "hi"), ShouldNotBeNil) }) @@ -229,106 +189,33 @@ func Test_Struct(t *testing.T) { }) }) - Convey("Map to struct in strict mode", t, func() { - cfg, err := Load([]byte(` -name=bruce -age=a30`)) - So(err, ShouldBeNil) - - type Strict struct { - Name string `ini:"name"` - Age int `ini:"age"` - } - s := new(Strict) - - So(cfg.Section("").StrictMapTo(s), ShouldNotBeNil) - }) - Convey("Reflect from struct", t, func() { type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string - Years []int - Numbers []int64 - Ages []uint - Populations []uint64 - Coordinates []float64 - None []int + Dates []time.Time `delim:"|"` + Places []string + None []int } type Author struct { Name string `ini:"NAME"` Male bool Age int - Height uint GPA float64 - Date time.Time NeverMind string `ini:"-"` *Embeded `ini:"infos"` } - - t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") - So(err, ShouldBeNil) - a := &Author{"Unknwon", true, 21, 100, 2.8, t, "", + a := &Author{"Unknwon", true, 21, 2.8, "", &Embeded{ - []time.Time{t, t}, + []time.Time{time.Now(), time.Now()}, []string{"HangZhou", "Boston"}, - []int{1993, 1994}, - []int64{10010, 10086}, - []uint{18, 19}, - []uint64{12345678, 98765432}, - []float64{192.168, 10.11}, []int{}, }} cfg := Empty() So(ReflectFrom(cfg, a), ShouldBeNil) - - var buf bytes.Buffer - _, err = cfg.WriteTo(&buf) - So(err, ShouldBeNil) - So(buf.String(), ShouldEqual, `NAME = Unknwon -Male = true -Age = 21 -Height = 100 -GPA = 2.8 -Date = 1993-10-07T20:17:05Z - -[infos] -Dates = 1993-10-07T20:17:05Z|1993-10-07T20:17:05Z -Places = HangZhou,Boston -Years = 1993,1994 -Numbers = 10010,10086 -Ages = 18,19 -Populations = 12345678,98765432 -Coordinates = 192.168,10.11 -None = - -`) + cfg.SaveTo("testdata/conf_reflect.ini") Convey("Reflect from non-point struct", func() { So(ReflectFrom(cfg, Author{}), ShouldNotBeNil) }) - - Convey("Reflect from struct with omitempty", func() { - cfg := Empty() - type SpecialStruct struct { - FirstName string `ini:"first_name"` - LastName string `ini:"last_name"` - JustOmitMe string `ini:"omitempty"` - LastLogin time.Time `ini:"last_login,omitempty"` - LastLogin2 time.Time `ini:",omitempty"` - NotEmpty int `ini:"omitempty"` - } - - So(ReflectFrom(cfg, &SpecialStruct{FirstName: "John", LastName: "Doe", NotEmpty: 9}), ShouldBeNil) - - var buf bytes.Buffer - _, err = cfg.WriteTo(&buf) - So(buf.String(), ShouldEqual, `first_name = John -last_name = Doe -omitempty = 9 - -`) - }) }) } diff --git a/vendor/github.com/prometheus/client_golang/.travis.yml b/vendor/github.com/prometheus/client_golang/.travis.yml index e9bca4ec76d..bf0716ace15 100644 --- a/vendor/github.com/prometheus/client_golang/.travis.yml +++ b/vendor/github.com/prometheus/client_golang/.travis.yml @@ -2,9 +2,7 @@ sudo: false language: go go: - - 1.7.x - - 1.8.x - - 1.9.x + - 1.4 script: - - go test -short ./... + - go test -short ./... diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md new file mode 100644 index 00000000000..b67a4f5a41b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/AUTHORS.md @@ -0,0 +1,18 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Björn Rabenstein + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Bernerd Schaefer +* Björn Rabenstein +* Daniel Bornkessel +* Jeff Younker +* Julius Volz +* Matt T. Proud +* Tobias Schmidt + diff --git a/vendor/github.com/prometheus/client_golang/CHANGELOG.md b/vendor/github.com/prometheus/client_golang/CHANGELOG.md index 330788a4ee4..5c662f9f521 100644 --- a/vendor/github.com/prometheus/client_golang/CHANGELOG.md +++ b/vendor/github.com/prometheus/client_golang/CHANGELOG.md @@ -1,26 +1,3 @@ -## 0.8.0 / 2016-08-17 -* [CHANGE] Registry is doing more consistency checks. This might break - existing setups that used to export inconsistent metrics. -* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow - arbitrary grouping. -* [CHANGE] Removed `SelfCollector`. -* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods. -* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`, - `extraction`. -* [CHANGE] Deprecated a number of functions. -* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer` - interfaces. -* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package - `promhttp`) and enabling the creation of other exposition mechanisms. -* [FEATURE] `MustRegister` is variadic now, allowing registration of many - collectors in one call. -* [FEATURE] Added HTTP API v1 package. -* [ENHANCEMENT] Numerous documentation improvements. -* [ENHANCEMENT] Improved metric sorting. -* [ENHANCEMENT] Inlined fnv64a hashing for improved performance. -* [ENHANCEMENT] Several test improvements. -* [BUGFIX] Handle collisions in MetricVec. - ## 0.7.0 / 2015-07-27 * [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix. * [BUGFIX] Closed gaps in metric consistency check. diff --git a/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md index 40503edbf18..5705f0fbea2 100644 --- a/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md +++ b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md @@ -2,9 +2,9 @@ Prometheus uses GitHub to manage reviews of pull requests. -* If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) the maintainer of this repository (see - [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). diff --git a/vendor/github.com/prometheus/client_golang/MAINTAINERS.md b/vendor/github.com/prometheus/client_golang/MAINTAINERS.md deleted file mode 100644 index 3ede55fe186..00000000000 --- a/vendor/github.com/prometheus/client_golang/MAINTAINERS.md +++ /dev/null @@ -1 +0,0 @@ -* Björn Rabenstein diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30ee9..37e4a7d410e 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -7,6 +7,11 @@ SoundCloud Ltd. (http://soundcloud.com/). The following components are included in this product: +goautoneg +http://bitbucket.org/ww/goautoneg +Copyright 2011, Open Knowledge Foundation Ltd. +See README.txt for license details. + perks - a fork of https://github.com/bmizerany/perks https://github.com/beorn7/perks Copyright 2013-2015 Blake Mizerany, Björn Rabenstein diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md index 0eb0df1dff8..557eacf5a82 100644 --- a/vendor/github.com/prometheus/client_golang/README.md +++ b/vendor/github.com/prometheus/client_golang/README.md @@ -1,15 +1,12 @@ # Prometheus Go client library [![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) -[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang) This is the [Go](http://golang.org) client library for [Prometheus](http://prometheus.io). It has two separate parts, one for instrumenting application code, and one for creating clients that talk to the Prometheus HTTP API. -__This library requires Go1.7 or later.__ - ## Instrumenting applications [![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) @@ -32,8 +29,7 @@ The [`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) contains the client for the [Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you -to write Go applications that query time series data from a Prometheus -server. It is still in alpha stage. +to write Go applications that query time series data from a Prometheus server. ## Where is `model`, `extraction`, and `text`? diff --git a/vendor/github.com/prometheus/client_golang/VERSION b/vendor/github.com/prometheus/client_golang/VERSION index a3df0a6959e..faef31a4357 100644 --- a/vendor/github.com/prometheus/client_golang/VERSION +++ b/vendor/github.com/prometheus/client_golang/VERSION @@ -1 +1 @@ -0.8.0 +0.7.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md index 44986bff06b..81032bed886 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -1 +1,53 @@ -See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). +# Overview +This is the [Prometheus](http://www.prometheus.io) telemetric +instrumentation client [Go](http://golang.org) client library. It +enable authors to define process-space metrics for their servers and +expose them through a web service interface for extraction, +aggregation, and a whole slew of other post processing techniques. + +# Installing + $ go get github.com/prometheus/client_golang/prometheus + +# Example +```go +package main + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + indexed = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "my_company", + Subsystem: "indexer", + Name: "documents_indexed", + Help: "The number of documents indexed.", + }) + size = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "my_company", + Subsystem: "storage", + Name: "documents_total_size_bytes", + Help: "The total size of all documents in the storage.", + }) +) + +func main() { + http.Handle("/metrics", prometheus.Handler()) + + indexed.Inc() + size.Set(5) + + http.ListenAndServe(":8080", nil) +} + +func init() { + prometheus.MustRegister(indexed) + prometheus.MustRegister(size) +} +``` + +# Documentation + +[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go index faad39b4c60..6ae7333fcc9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go @@ -14,7 +14,6 @@ package prometheus import ( - "sync" "testing" ) @@ -33,29 +32,6 @@ func BenchmarkCounterWithLabelValues(b *testing.B) { } } -func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) { - m := NewCounterVec( - CounterOpts{ - Name: "benchmark_counter", - Help: "A counter to benchmark it.", - }, - []string{"one", "two", "three"}, - ) - b.ReportAllocs() - b.ResetTimer() - wg := sync.WaitGroup{} - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - for j := 0; j < b.N/10; j++ { - m.WithLabelValues("eins", "zwei", "drei").Inc() - } - wg.Done() - }() - } - wg.Wait() -} - func BenchmarkCounterWithMappedLabels(b *testing.B) { m := NewCounterVec( CounterOpts{ @@ -129,9 +105,8 @@ func BenchmarkGaugeNoLabels(b *testing.B) { func BenchmarkSummaryWithLabelValues(b *testing.B) { m := NewSummaryVec( SummaryOpts{ - Name: "benchmark_summary", - Help: "A summary to benchmark it.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Name: "benchmark_summary", + Help: "A summary to benchmark it.", }, []string{"one", "two", "three"}, ) @@ -144,9 +119,8 @@ func BenchmarkSummaryWithLabelValues(b *testing.B) { func BenchmarkSummaryNoLabels(b *testing.B) { m := NewSummary(SummaryOpts{ - Name: "benchmark_summary", - Help: "A summary to benchmark it.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Name: "benchmark_summary", + Help: "A summary to benchmark it.", }, ) b.ReportAllocs() diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index 623d3d83fef..c04688009f5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -15,15 +15,15 @@ package prometheus // Collector is the interface implemented by anything that can be used by // Prometheus to collect metrics. A Collector has to be registered for -// collection. See Registerer.Register. +// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet. // -// The stock metrics provided by this package (Gauge, Counter, Summary, -// Histogram, Untyped) are also Collectors (which only ever collect one metric, -// namely itself). An implementer of Collector may, however, collect multiple -// metrics in a coordinated fashion and/or create metrics on the fly. Examples -// for collectors already implemented in this library are the metric vectors -// (i.e. collection of multiple instances of the same Metric but with different -// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +// The stock metrics provided by this package (like Gauge, Counter, Summary) are +// also Collectors (which only ever collect one metric, namely itself). An +// implementer of Collector may, however, collect multiple metrics in a +// coordinated fashion and/or create metrics on the fly. Examples for collectors +// already implemented in this library are the metric vectors (i.e. collection +// of multiple instances of the same Metric but with different label values) +// like GaugeVec or SummaryVec, and the ExpvarCollector. type Collector interface { // Describe sends the super-set of all possible descriptors of metrics // collected by this Collector to the provided channel and returns once @@ -37,39 +37,39 @@ type Collector interface { // executing this method, it must send an invalid descriptor (created // with NewInvalidDesc) to signal the error to the registry. Describe(chan<- *Desc) - // Collect is called by the Prometheus registry when collecting - // metrics. The implementation sends each collected metric via the - // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by - // Describe. Returned metrics that share the same descriptor must differ - // in their variable label values. This method may be called - // concurrently and must therefore be implemented in a concurrency safe - // way. Blocking occurs at the expense of total performance of rendering - // all registered metrics. Ideally, Collector implementations support - // concurrent readers. + // Collect is called by Prometheus when collecting metrics. The + // implementation sends each collected metric via the provided channel + // and returns once the last metric has been sent. The descriptor of + // each sent metric is one of those returned by Describe. Returned + // metrics that share the same descriptor must differ in their variable + // label values. This method may be called concurrently and must + // therefore be implemented in a concurrency safe way. Blocking occurs + // at the expense of total performance of rendering all registered + // metrics. Ideally, Collector implementations support concurrent + // readers. Collect(chan<- Metric) } -// selfCollector implements Collector for a single Metric so that the Metric -// collects itself. Add it as an anonymous field to a struct that implements -// Metric, and call init with the Metric itself as an argument. -type selfCollector struct { +// SelfCollector implements Collector for a single Metric so that that the +// Metric collects itself. Add it as an anonymous field to a struct that +// implements Metric, and call Init with the Metric itself as an argument. +type SelfCollector struct { self Metric } -// init provides the selfCollector with a reference to the metric it is supposed +// Init provides the SelfCollector with a reference to the metric it is supposed // to collect. It is usually called within the factory function to create a // metric. See example. -func (c *selfCollector) init(self Metric) { +func (c *SelfCollector) Init(self Metric) { c.self = self } // Describe implements Collector. -func (c *selfCollector) Describe(ch chan<- *Desc) { +func (c *SelfCollector) Describe(ch chan<- *Desc) { ch <- c.self.Desc() } // Collect implements Collector. -func (c *selfCollector) Collect(ch chan<- Metric) { +func (c *SelfCollector) Collect(ch chan<- Metric) { ch <- c.self } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 273db5f8159..a2952d1c881 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -15,6 +15,7 @@ package prometheus import ( "errors" + "hash/fnv" ) // Counter is a Metric that represents a single numerical value that only ever @@ -30,8 +31,13 @@ type Counter interface { Metric Collector - // Inc increments the counter by 1. Use Add to increment it by arbitrary - // non-negative values. + // Set is used to set the Counter to an arbitrary value. It is only used + // if you have to transfer a value from an external counter into this + // Prometheus metric. Do not use it for regular handling of a + // Prometheus counter (as it can be used to break the contract of + // monotonically increasing values). + Set(float64) + // Inc increments the counter by 1. Inc() // Add adds the given value to the counter. It panics if the value is < // 0. @@ -50,7 +56,7 @@ func NewCounter(opts CounterOpts) Counter { opts.ConstLabels, ) result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} - result.init(result) // Init self-collection. + result.Init(result) // Init self-collection. return result } @@ -70,12 +76,16 @@ func (c *counter) Add(v float64) { // if you want to count the same thing partitioned by various dimensions // (e.g. number of HTTP requests, partitioned by response code and // method). Create instances with NewCounterVec. +// +// CounterVec embeds MetricVec. See there for a full list of methods with +// detailed documentation. type CounterVec struct { - *metricVec + MetricVec } // NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. +// partitioned by the given label names. At least one label name must be +// provided. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -84,63 +94,39 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { opts.ConstLabels, ) return &CounterVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - result := &counter{value: value{ - desc: desc, - valType: CounterValue, - labelPairs: makeLabelPairs(desc, lvs), - }} - result.init(result) // Init self-collection. - return result - }), + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + result := &counter{value: value{ + desc: desc, + valType: CounterValue, + labelPairs: makeLabelPairs(desc, lvs), + }} + result.Init(result) // Init self-collection. + return result + }, + }, } } -// GetMetricWithLabelValues returns the Counter for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Counter is created. -// -// It is possible to call this method without using the returned Counter to only -// create the new Counter but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Counter for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Counter from the CounterVec. In that case, -// the Counter will still exist, but it will not be exported anymore, even if a -// Counter with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Counter and not a +// Metric so that no type conversion is required. func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := m.metricVec.getMetricWithLabelValues(lvs...) + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Counter), err } return nil, err } -// GetMetricWith returns the Counter for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Counter is created. Implications of -// creating a Counter without using it and keeping the Counter for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Counter and not a Metric so that no +// type conversion is required. func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := m.metricVec.getMetricWith(labels) + metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Counter), err } @@ -152,14 +138,14 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { // error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) func (m *CounterVec) WithLabelValues(lvs ...string) Counter { - return m.metricVec.withLabelValues(lvs...).(Counter) + return m.MetricVec.WithLabelValues(lvs...).(Counter) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. By not returning an error, With allows shortcuts like // myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) func (m *CounterVec) With(labels Labels) Counter { - return m.metricVec.with(labels).(Counter) + return m.MetricVec.With(labels).(Counter) } // CounterFunc is a Counter whose value is determined at collect time by calling a diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go index 8d5cd0b6f67..67391a23aa8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go @@ -14,7 +14,6 @@ package prometheus import ( - "fmt" "math" "testing" @@ -57,58 +56,3 @@ func decreaseCounter(c *counter) (err error) { c.Add(-1) return nil } - -func TestCounterVecGetMetricWithInvalidLabelValues(t *testing.T) { - testCases := []struct { - desc string - labels Labels - }{ - { - desc: "non utf8 label value", - labels: Labels{"a": "\xFF"}, - }, - { - desc: "not enough label values", - labels: Labels{}, - }, - { - desc: "too many label values", - labels: Labels{"a": "1", "b": "2"}, - }, - } - - for _, test := range testCases { - counterVec := NewCounterVec(CounterOpts{ - Name: "test", - }, []string{"a"}) - - labelValues := make([]string, len(test.labels)) - for _, val := range test.labels { - labelValues = append(labelValues, val) - } - - expectPanic(t, func() { - counterVec.WithLabelValues(labelValues...) - }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) - expectPanic(t, func() { - counterVec.With(test.labels) - }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) - - if _, err := counterVec.GetMetricWithLabelValues(labelValues...); err == nil { - t.Errorf("GetMetricWithLabelValues: expected error because: %s", test.desc) - } - if _, err := counterVec.GetMetricWith(test.labels); err == nil { - t.Errorf("GetMetricWith: expected error because: %s", test.desc) - } - } -} - -func expectPanic(t *testing.T, op func(), errorMsg string) { - defer func() { - if err := recover(); err == nil { - t.Error(errorMsg) - } - }() - - op() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 920abc92019..fcde784d640 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -1,30 +1,37 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package prometheus import ( + "bytes" "errors" "fmt" + "hash/fnv" + "regexp" "sort" "strings" "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) +var ( + metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) + labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") +) + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + // Desc is the descriptor used by every Prometheus Metric. It is essentially // the immutable meta-data of a Metric. The normal Metric implementations // included in this package manage their Desc under the hood. Users only have to @@ -60,7 +67,7 @@ type Desc struct { // Help string. Each Desc with the same fqName must have the same // dimHash. dimHash uint64 - // err is an error that occurred during construction. It is reported on + // err is an error that occured during construction. It is reported on // registration time. err error } @@ -85,7 +92,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * d.err = errors.New("empty help string") return d } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + if !metricNameRE.MatchString(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } @@ -109,12 +116,6 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * for _, labelName := range labelNames { labelValues = append(labelValues, constLabels[labelName]) } - // Validate the const label values. They can't have a wrong cardinality, so - // use in len(labelValues) as expectedNumberOfValues. - if err := validateLabelValues(labelValues, len(labelValues)); err != nil { - d.err = err - return d - } // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. @@ -130,25 +131,31 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * d.err = errors.New("duplicate label names") return d } - - vh := hashNew() + h := fnv.New64a() + var b bytes.Buffer // To copy string contents into, avoiding []byte allocations. for _, val := range labelValues { - vh = hashAdd(vh, val) - vh = hashAddByte(vh, separatorByte) + b.Reset() + b.WriteString(val) + b.WriteByte(separatorByte) + h.Write(b.Bytes()) } - d.id = vh + d.id = h.Sum64() // Sort labelNames so that order doesn't matter for the hash. sort.Strings(labelNames) // Now hash together (in this order) the help string and the sorted // label names. - lh := hashNew() - lh = hashAdd(lh, help) - lh = hashAddByte(lh, separatorByte) + h.Reset() + b.Reset() + b.WriteString(help) + b.WriteByte(separatorByte) + h.Write(b.Bytes()) for _, labelName := range labelNames { - lh = hashAdd(lh, labelName) - lh = hashAddByte(lh, separatorByte) + b.Reset() + b.WriteString(labelName) + b.WriteByte(separatorByte) + h.Write(b.Bytes()) } - d.dimHash = lh + d.dimHash = h.Sum64() d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) for n, v := range constLabels { @@ -187,3 +194,8 @@ func (d *Desc) String() string { d.variableLabels, ) } + +func checkLabelName(l string) bool { + return labelNameRE.MatchString(l) && + !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc_test.go b/vendor/github.com/prometheus/client_golang/prometheus/desc_test.go deleted file mode 100644 index 2f962652c8f..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package prometheus - -import ( - "testing" -) - -func TestNewDescInvalidLabelValues(t *testing.T) { - desc := NewDesc( - "sample_label", - "sample label", - nil, - Labels{"a": "\xFF"}, - ) - if desc.err == nil { - t.Errorf("NewDesc: expected error because: %s", desc.err) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 36ef155670c..425fe8793cd 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -11,26 +11,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package prometheus provides metrics primitives to instrument code for -// monitoring. It also offers a registry for metrics. Sub-packages allow to -// expose the registered metrics via HTTP (package promhttp) or push them to a -// Pushgateway (package push). +// Package prometheus provides embeddable metric primitives for servers and +// standardized exposition of telemetry through a web services interface. // // All exported functions and methods are safe to be used concurrently unless // specified otherwise. // -// A Basic Example +// To expose metrics registered with the Prometheus registry, an HTTP server +// needs to know about the Prometheus handler. The usual endpoint is "/metrics". // -// As a starting point, a very basic usage example: +// http.Handle("/metrics", prometheus.Handler()) +// +// As a starting point a very basic usage example: // // package main // // import ( -// "log" // "net/http" // // "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" // ) // // var ( @@ -38,149 +37,73 @@ // Name: "cpu_temperature_celsius", // Help: "Current temperature of the CPU.", // }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) +// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }) // ) // // func init() { -// // Metrics have to be registered to be exposed: // prometheus.MustRegister(cpuTemp) // prometheus.MustRegister(hdFailures) // } // // func main() { // cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// hdFailures.Inc() // -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) +// http.Handle("/metrics", prometheus.Handler()) +// http.ListenAndServe(":8080", nil) // } // // -// This is a complete program that exports two metrics, a Gauge and a Counter, -// the latter with a label attached to turn it into a (one-dimensional) vector. -// -// Metrics -// -// The number of exported identifiers in this package might appear a bit -// overwhelming. However, in addition to the basic plumbing shown in the example -// above, you only need to understand the different metric types and their -// vector versions for basic usage. -// -// Above, you have already touched the Counter and the Gauge. There are two more -// advanced metric types: the Summary and Histogram. A more thorough description -// of those four metric types can be found in the Prometheus docs: -// https://prometheus.io/docs/concepts/metric_types/ +// This is a complete program that exports two metrics, a Gauge and a Counter. +// It also exports some stats about the HTTP usage of the /metrics +// endpoint. (See the Handler function for more detail.) // -// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the -// Prometheus server not to assume anything about its type. +// Two more advanced metric types are the Summary and Histogram. // -// In addition to the fundamental metric types Gauge, Counter, Summary, -// Histogram, and Untyped, a very important part of the Prometheus data model is -// the partitioning of samples along dimensions called labels, which results in +// In addition to the fundamental metric types Gauge, Counter, Summary, and +// Histogram, a very important part of the Prometheus data model is the +// partitioning of samples along dimensions called labels, which results in // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// HistogramVec, and UntypedVec. -// -// While only the fundamental metric types implement the Metric interface, both -// the metrics and their vector versions implement the Collector interface. A -// Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, -// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, -// SummaryVec, HistogramVec, and UntypedVec are not. -// -// To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or -// UntypedOpts. -// -// Custom Collectors and constant Metrics -// -// While you could create your own implementations of Metric, most likely you -// will only ever implement the Collector interface on your own. At a first -// glance, a custom Collector seems handy to bundle Metrics for common -// registration (with the prime example of the different metric vectors above, -// which bundle all the metrics of the same name but with different labels). -// -// There is a more involved use case, too: If you already have metrics -// available, created outside of the Prometheus context, you don't need the -// interface of the various Metric types. You essentially want to mirror the -// existing numbers into Prometheus Metrics during collection. An own -// implementation of the Collector interface is perfect for that. You can create -// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). That will happen in -// the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created later. -// NewDesc comes in handy to create those Desc instances. -// -// The Collector example illustrates the use case. You can also look at the -// source code of the processCollector (mirroring process metrics), the -// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar -// metrics) as examples that are used in this package itself. -// -// If you just need to call a function to get a single float value to collect as -// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting -// shortcuts. -// -// Advanced Uses of the Registry -// -// While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might cause. -// As suggested by the name, MustRegister panics if an error occurs. With the -// Register function, the error is returned and can be handled. -// -// An error is returned if the registered Collector is incompatible or -// inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data model. -// Inconsistencies are ideally detected at registration time, not at collect -// time. The former will usually be detected at start-up time of a program, -// while the latter will only happen at scrape time, possibly not even on the -// first scrape if the inconsistency only becomes relevant later. That is the -// main reason why a Collector and a Metric have to describe themselves to the -// registry. -// -// So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegisterer variable. With NewRegistry, you -// can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in the -// same way on a custom registry as the global functions Register and Unregister -// on the default registry. -// -// There are a number of uses for custom registries: You can use registries with -// special properties, see NewPedanticRegistry. You can avoid global state, as -// it is imposed by the DefaultRegisterer. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use -// separate registries for testing purposes. -// -// Also note that the DefaultRegisterer comes registered with a Collector for Go -// runtime metrics (via NewGoCollector) and a Collector for process metrics (via -// NewProcessCollector). With a custom registry, you are in control and decide -// yourself about the Collectors to register. -// -// HTTP Exposition -// -// The Registry implements the Gatherer interface. The caller of the Gather -// method can then expose the gathered metrics in some way. Usually, the metrics -// are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// (The top-level functions in the prometheus package are deprecated.) -// -// Pushing to the Pushgateway -// -// Function for pushing to the Pushgateway can be found in the push sub-package. -// -// Graphite Bridge -// -// Functions and examples to push metrics from a Gatherer to Graphite can be -// found in the graphite sub-package. -// -// Other Means of Exposition -// -// More ways of exposing metrics can easily be added by following the approaches -// of the existing implementations. +// and HistogramVec. +// +// Those are all the parts needed for basic usage. Detailed documentation and +// examples are provided below. +// +// Everything else this package offers is essentially for "power users" only. A +// few pointers to "power user features": +// +// All the various ...Opts structs have a ConstLabels field for labels that +// never change their value (which is only useful under special circumstances, +// see documentation of the Opts type). +// +// The Untyped metric behaves like a Gauge, but signals the Prometheus server +// not to assume anything about its type. +// +// Functions to fine-tune how the metric registry works: EnableCollectChecks, +// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook. +// +// For custom metric collection, there are two entry points: Custom Metric +// implementations and custom Collector implementations. A Metric is the +// fundamental unit in the Prometheus data model: a sample at a point in time +// together with its meta-data (like its fully-qualified name and any number of +// pairs of label name and label value) that knows how to marshal itself into a +// data transfer object (aka DTO, implemented as a protocol buffer). A Collector +// gets registered with the Prometheus registry and manages the collection of +// one or more Metrics. Many parts of this package are building blocks for +// Metrics and Collectors. Desc is the metric descriptor, actually used by all +// metrics under the hood, and by Collectors to describe the Metrics to be +// collected, but only to be dealt with by users if they implement their own +// Metrics or Collectors. To create a Desc, the BuildFQName function will come +// in handy. Other useful components for Metric and Collector implementation +// include: LabelPairSorter to sort the DTO version of label pairs, +// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at +// collection time, MetricVec to bundle custom Metrics into a metric vector +// Collector, SelfCollector to make a custom Metric collect itself. +// +// A good example for a custom Collector is the ExpVarCollector included in this +// package, which exports variables exported via the "expvar" package as +// Prometheus metrics. package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go index 260c1b52ded..6f3e215d47b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go @@ -13,7 +13,11 @@ package prometheus_test -import "github.com/prometheus/client_golang/prometheus" +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" +) // ClusterManager is an example for a system that might have been built without // Prometheus in mind. It models a central manager of jobs running in a @@ -25,9 +29,10 @@ import "github.com/prometheus/client_golang/prometheus" // make use of ConstLabels to be able to register each ClusterManager instance // with Prometheus. type ClusterManager struct { - Zone string - OOMCountDesc *prometheus.Desc - RAMUsageDesc *prometheus.Desc + Zone string + OOMCount *prometheus.CounterVec + RAMUsage *prometheus.GaugeVec + mtx sync.Mutex // Protects OOMCount and RAMUsage. // ... many more fields } @@ -50,69 +55,76 @@ func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( return } -// Describe simply sends the two Descs in the struct to the channel. +// Describe faces the interesting challenge that the two metric vectors that are +// used in this example are already Collectors themselves. However, thanks to +// the use of channels, it is really easy to "chain" Collectors. Here we simply +// call the Describe methods of the two metric vectors. func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { - ch <- c.OOMCountDesc - ch <- c.RAMUsageDesc + c.OOMCount.Describe(ch) + c.RAMUsage.Describe(ch) } // Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it -// creates constant metrics for each host on the fly based on the returned data. -// -// Note that Collect could be called concurrently, so we depend on -// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe. +// sets the retrieved values in the two metric vectors and then sends all their +// metrics to the channel (again using a chaining technique as in the Describe +// method). Since Collect could be called multiple times concurrently, that part +// is protected by a mutex. func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() + c.mtx.Lock() + defer c.mtx.Unlock() for host, oomCount := range oomCountByHost { - ch <- prometheus.MustNewConstMetric( - c.OOMCountDesc, - prometheus.CounterValue, - float64(oomCount), - host, - ) + c.OOMCount.WithLabelValues(host).Set(float64(oomCount)) } for host, ramUsage := range ramUsageByHost { - ch <- prometheus.MustNewConstMetric( - c.RAMUsageDesc, - prometheus.GaugeValue, - ramUsage, - host, - ) + c.RAMUsage.WithLabelValues(host).Set(ramUsage) } + c.OOMCount.Collect(ch) + c.RAMUsage.Collect(ch) + // All metrics in OOMCount and RAMUsage are sent to the channel now. We + // can safely reset the two metric vectors now, so that we can start + // fresh in the next Collect cycle. (Imagine a host disappears from the + // cluster. If we did not reset here, its Metric would stay in the + // metric vectors forever.) + c.OOMCount.Reset() + c.RAMUsage.Reset() } -// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note +// NewClusterManager creates the two metric vectors OOMCount and RAMUsage. Note // that the zone is set as a ConstLabel. (It's different in each instance of the -// ClusterManager, but constant over the lifetime of an instance.) Then there is -// a variable label "host", since we want to partition the collected metrics by -// host. Since all Descs created in this way are consistent across instances, -// with a guaranteed distinction by the "zone" label, we can register different -// ClusterManager instances with the same registry. +// ClusterManager, but constant over the lifetime of an instance.) The reported +// values are partitioned by host, which is therefore a variable label. func NewClusterManager(zone string) *ClusterManager { return &ClusterManager{ Zone: zone, - OOMCountDesc: prometheus.NewDesc( - "clustermanager_oom_crashes_total", - "Number of OOM crashes.", + OOMCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "clustermanager", + Name: "oom_count", + Help: "number of OOM crashes", + ConstLabels: prometheus.Labels{"zone": zone}, + }, []string{"host"}, - prometheus.Labels{"zone": zone}, ), - RAMUsageDesc: prometheus.NewDesc( - "clustermanager_ram_usage_bytes", - "RAM usage as reported to the cluster manager.", + RAMUsage: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Subsystem: "clustermanager", + Name: "ram_usage_bytes", + Help: "RAM usage as reported to the cluster manager", + ConstLabels: prometheus.Labels{"zone": zone}, + }, []string{"host"}, - prometheus.Labels{"zone": zone}, ), } } -func ExampleCollector() { +func ExampleCollector_clustermanager() { workerDB := NewClusterManager("db") workerCA := NewClusterManager("ca") + prometheus.MustRegister(workerDB) + prometheus.MustRegister(workerCA) // Since we are dealing with custom Collector implementations, it might - // be a good idea to try it out with a pedantic registry. - reg := prometheus.NewPedanticRegistry() - reg.MustRegister(workerDB) - reg.MustRegister(workerCA) + // be a good idea to enable the collect checks in the registry. + prometheus.EnableCollectChecks(true) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_memstats_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_memstats_test.go new file mode 100644 index 00000000000..a84d072504e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/example_memstats_test.go @@ -0,0 +1,87 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "runtime" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + allocDesc = prometheus.NewDesc( + prometheus.BuildFQName("", "memstats", "alloc_bytes"), + "bytes allocated and still in use", + nil, nil, + ) + totalAllocDesc = prometheus.NewDesc( + prometheus.BuildFQName("", "memstats", "total_alloc_bytes"), + "bytes allocated (even if freed)", + nil, nil, + ) + numGCDesc = prometheus.NewDesc( + prometheus.BuildFQName("", "memstats", "num_gc_total"), + "number of GCs run", + nil, nil, + ) +) + +// MemStatsCollector is an example for a custom Collector that solves the +// problem of feeding into multiple metrics at the same time. The +// runtime.ReadMemStats should happen only once, and then the results need to be +// fed into a number of separate Metrics. In this example, only a few of the +// values reported by ReadMemStats are used. For each, there is a Desc provided +// as a var, so the MemStatsCollector itself needs nothing else in the +// struct. Only the methods need to be implemented. +type MemStatsCollector struct{} + +// Describe just sends the three Desc objects for the Metrics we intend to +// collect. +func (_ MemStatsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- allocDesc + ch <- totalAllocDesc + ch <- numGCDesc +} + +// Collect does the trick by calling ReadMemStats once and then constructing +// three different Metrics on the fly. +func (_ MemStatsCollector) Collect(ch chan<- prometheus.Metric) { + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + ch <- prometheus.MustNewConstMetric( + allocDesc, + prometheus.GaugeValue, + float64(ms.Alloc), + ) + ch <- prometheus.MustNewConstMetric( + totalAllocDesc, + prometheus.GaugeValue, + float64(ms.TotalAlloc), + ) + ch <- prometheus.MustNewConstMetric( + numGCDesc, + prometheus.CounterValue, + float64(ms.NumGC), + ) + // To avoid new allocations on each collection, you could also keep + // metric objects around and return the same objects each time, just + // with new values set. +} + +func ExampleCollector_memstats() { + prometheus.MustRegister(&MemStatsCollector{}) + // Since we are dealing with custom Collector implementations, it might + // be a good idea to enable the collect checks in the registry. + prometheus.EnableCollectChecks(true) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go new file mode 100644 index 00000000000..608deeb0279 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "runtime" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +func NewCallbackMetric(desc *prometheus.Desc, callback func() float64) *CallbackMetric { + result := &CallbackMetric{desc: desc, callback: callback} + result.Init(result) // Initialize the SelfCollector. + return result +} + +// TODO: Come up with a better example. + +// CallbackMetric is an example for a user-defined Metric that exports the +// result of a function call as a metric of type "untyped" without any +// labels. It uses SelfCollector to turn the Metric into a Collector so that it +// can be registered with Prometheus. +// +// Note that this example is pretty much academic as the prometheus package +// already provides an UntypedFunc type. +type CallbackMetric struct { + prometheus.SelfCollector + + desc *prometheus.Desc + callback func() float64 +} + +func (cm *CallbackMetric) Desc() *prometheus.Desc { + return cm.desc +} + +func (cm *CallbackMetric) Write(m *dto.Metric) error { + m.Untyped = &dto.Untyped{Value: proto.Float64(cm.callback())} + return nil +} + +func ExampleSelfCollector() { + m := NewCallbackMetric( + prometheus.NewDesc( + "runtime_goroutines_count", + "Total number of goroutines that currently exist.", + nil, nil, // No labels, these must be nil. + ), + func() float64 { + return float64(runtime.NumGoroutine()) + }, + ) + prometheus.MustRegister(m) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go deleted file mode 100644 index c5e7de5e5e5..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - // apiRequestDuration tracks the duration separate for each HTTP status - // class (1xx, 2xx, ...). This creates a fair amount of time series on - // the Prometheus server. Usually, you would track the duration of - // serving HTTP request without partitioning by outcome. Do something - // like this only if needed. Also note how only status classes are - // tracked, not every single status code. The latter would create an - // even larger amount of time series. Request counters partitioned by - // status code are usually OK as each counter only creates one time - // series. Histograms are way more expensive, so partition with care and - // only where you really need separate latency tracking. Partitioning by - // status class is only an example. In concrete cases, other partitions - // might make more sense. - apiRequestDuration = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "api_request_duration_seconds", - Help: "Histogram for the request duration of the public API, partitioned by status class.", - Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5), - }, - []string{"status_class"}, - ) -) - -func handler(w http.ResponseWriter, r *http.Request) { - status := http.StatusOK - // The ObserverFunc gets called by the deferred ObserveDuration and - // decides which Histogram's Observe method is called. - timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { - switch { - case status >= 500: // Server error. - apiRequestDuration.WithLabelValues("5xx").Observe(v) - case status >= 400: // Client error. - apiRequestDuration.WithLabelValues("4xx").Observe(v) - case status >= 300: // Redirection. - apiRequestDuration.WithLabelValues("3xx").Observe(v) - case status >= 200: // Success. - apiRequestDuration.WithLabelValues("2xx").Observe(v) - default: // Informational. - apiRequestDuration.WithLabelValues("1xx").Observe(v) - } - })) - defer timer.ObserveDuration() - - // Handle the request. Set status accordingly. - // ... -} - -func ExampleTimer_complex() { - http.HandleFunc("/api", handler) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go deleted file mode 100644 index 7184a0d1d9e..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "os" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - // If a function is called rarely (i.e. not more often than scrapes - // happen) or ideally only once (like in a batch job), it can make sense - // to use a Gauge for timing the function call. For timing a batch job - // and pushing the result to a Pushgateway, see also the comprehensive - // example in the push package. - funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "example_function_duration_seconds", - Help: "Duration of the last call of an example function.", - }) -) - -func run() error { - // The Set method of the Gauge is used to observe the duration. - timer := prometheus.NewTimer(prometheus.ObserverFunc(funcDuration.Set)) - defer timer.ObserveDuration() - - // Do something. Return errors as encountered. The use of 'defer' above - // makes sure the function is still timed properly. - return nil -} - -func ExampleTimer_gauge() { - if err := run(); err != nil { - os.Exit(1) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go deleted file mode 100644 index bd86bb47201..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "math/rand" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "example_request_duration_seconds", - Help: "Histogram for the runtime of a simple example function.", - Buckets: prometheus.LinearBuckets(0.01, 0.01, 10), - }) -) - -func ExampleTimer() { - // timer times this example function. It uses a Histogram, but a Summary - // would also work, as both implement Observer. Check out - // https://prometheus.io/docs/practices/histograms/ for differences. - timer := prometheus.NewTimer(requestDuration) - defer timer.ObserveDuration() - - // Do something here that takes time. - time.Sleep(time.Duration(rand.NormFloat64()*10000+50000) * time.Microsecond) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go index 45f60650f32..0344e465b0c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go @@ -14,16 +14,16 @@ package prometheus_test import ( - "bytes" + "flag" "fmt" "math" "net/http" + "os" "runtime" "sort" - "strings" + "time" dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" "github.com/golang/protobuf/proto" @@ -48,12 +48,16 @@ func ExampleGauge() { } func ExampleGaugeVec() { + binaryVersion := flag.String("binary_version", "debug", "Version of the binary: debug, canary, production.") + flag.Parse() + opsQueued := prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: "our_company", - Subsystem: "blob_storage", - Name: "ops_queued", - Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", + ConstLabels: prometheus.Labels{"binary_version": *binaryVersion}, }, []string{ // Which user has requested the operation? @@ -113,7 +117,7 @@ func ExampleCounter() { pushComplete := make(chan struct{}) // TODO: Start a goroutine that performs repository pushes and reports // each completion via the channel. - for range pushComplete { + for _ = range pushComplete { pushCounter.Inc() } // Output: @@ -121,10 +125,14 @@ func ExampleCounter() { } func ExampleCounterVec() { + binaryVersion := flag.String("environment", "test", "Execution environment: test, staging, production.") + flag.Parse() + httpReqs := prometheus.NewCounterVec( prometheus.CounterOpts{ - Name: "http_requests_total", - Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", + Name: "http_requests_total", + Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", + ConstLabels: prometheus.Labels{"env": *binaryVersion}, }, []string{"code", "method"}, ) @@ -169,8 +177,8 @@ func ExampleInstrumentHandler() { func ExampleLabelPairSorter() { labelPairs := []*dto.LabelPair{ - {Name: proto.String("status"), Value: proto.String("404")}, - {Name: proto.String("method"), Value: proto.String("get")}, + &dto.LabelPair{Name: proto.String("status"), Value: proto.String("404")}, + &dto.LabelPair{Name: proto.String("method"), Value: proto.String("get")}, } sort.Sort(prometheus.LabelPairSorter(labelPairs)) @@ -334,9 +342,8 @@ func ExampleRegister() { func ExampleSummary() { temps := prometheus.NewSummary(prometheus.SummaryOpts{ - Name: "pond_temperature_celsius", - Help: "The temperature of the frog pond.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. }) // Simulate some observations. @@ -373,9 +380,8 @@ func ExampleSummary() { func ExampleSummaryVec() { temps := prometheus.NewSummaryVec( prometheus.SummaryOpts{ - Name: "pond_temperature_celsius", - Help: "The temperature of the frog pond.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. }, []string{"species"}, ) @@ -390,90 +396,89 @@ func ExampleSummaryVec() { temps.WithLabelValues("leiopelma-hochstetteri") // Just for demonstration, let's check the state of the summary vector - // by registering it with a custom registry and then let it collect the - // metrics. - reg := prometheus.NewRegistry() - reg.MustRegister(temps) - - metricFamilies, err := reg.Gather() - if err != nil || len(metricFamilies) != 1 { - panic("unexpected behavior of custom test registry") + // by (ab)using its Collect method and the Write method of its elements + // (which is usually only used by Prometheus internally - code like the + // following will never appear in your own code). + metricChan := make(chan prometheus.Metric) + go func() { + defer close(metricChan) + temps.Collect(metricChan) + }() + + metricStrings := []string{} + for metric := range metricChan { + dtoMetric := &dto.Metric{} + metric.Write(dtoMetric) + metricStrings = append(metricStrings, proto.MarshalTextString(dtoMetric)) } - fmt.Println(proto.MarshalTextString(metricFamilies[0])) + sort.Strings(metricStrings) // For reproducible print order. + fmt.Println(metricStrings) // Output: - // name: "pond_temperature_celsius" - // help: "The temperature of the frog pond." - // type: SUMMARY - // metric: < - // label: < - // name: "species" - // value: "leiopelma-hochstetteri" + // [label: < + // name: "species" + // value: "leiopelma-hochstetteri" + // > + // summary: < + // sample_count: 0 + // sample_sum: 0 + // quantile: < + // quantile: 0.5 + // value: nan + // > + // quantile: < + // quantile: 0.9 + // value: nan // > - // summary: < - // sample_count: 0 - // sample_sum: 0 - // quantile: < - // quantile: 0.5 - // value: nan - // > - // quantile: < - // quantile: 0.9 - // value: nan - // > - // quantile: < - // quantile: 0.99 - // value: nan - // > + // quantile: < + // quantile: 0.99 + // value: nan // > // > - // metric: < - // label: < - // name: "species" - // value: "lithobates-catesbeianus" + // label: < + // name: "species" + // value: "lithobates-catesbeianus" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 31956.100000000017 + // quantile: < + // quantile: 0.5 + // value: 32.4 // > - // summary: < - // sample_count: 1000 - // sample_sum: 31956.100000000017 - // quantile: < - // quantile: 0.5 - // value: 32.4 - // > - // quantile: < - // quantile: 0.9 - // value: 41.4 - // > - // quantile: < - // quantile: 0.99 - // value: 41.9 - // > + // quantile: < + // quantile: 0.9 + // value: 41.4 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 // > // > - // metric: < - // label: < - // name: "species" - // value: "litoria-caerulea" + // label: < + // name: "species" + // value: "litoria-caerulea" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 // > - // summary: < - // sample_count: 1000 - // sample_sum: 29969.50000000001 - // quantile: < - // quantile: 0.5 - // value: 31.1 - // > - // quantile: < - // quantile: 0.9 - // value: 41.3 - // > - // quantile: < - // quantile: 0.99 - // value: 41.9 - // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 // > // > + // ] } -func ExampleNewConstSummary() { +func ExampleConstSummary() { desc := prometheus.NewDesc( "http_request_duration_seconds", "A summary of the HTTP request durations.", @@ -569,7 +574,7 @@ func ExampleHistogram() { // > } -func ExampleNewConstHistogram() { +func ExampleConstHistogram() { desc := prometheus.NewDesc( "http_request_duration_seconds", "A histogram of the HTTP request durations.", @@ -627,128 +632,18 @@ func ExampleNewConstHistogram() { // > } -func ExampleAlreadyRegisteredError() { - reqCounter := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "requests_total", - Help: "The total number of requests served.", +func ExamplePushCollectors() { + hostname, _ := os.Hostname() + completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_completion_time", + Help: "The timestamp of the last succesful completion of a DB backup.", }) - if err := prometheus.Register(reqCounter); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - // A counter for that metric has been registered before. - // Use the old counter from now on. - reqCounter = are.ExistingCollector.(prometheus.Counter) - } else { - // Something else went wrong! - panic(err) - } - } - reqCounter.Inc() -} - -func ExampleGatherers() { - reg := prometheus.NewRegistry() - temp := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "temperature_kelvin", - Help: "Temperature in Kelvin.", - }, - []string{"location"}, - ) - reg.MustRegister(temp) - temp.WithLabelValues("outside").Set(273.14) - temp.WithLabelValues("inside").Set(298.44) - - var parser expfmt.TextParser - - text := ` -# TYPE humidity_percent gauge -# HELP humidity_percent Humidity in %. -humidity_percent{location="outside"} 45.4 -humidity_percent{location="inside"} 33.2 -# TYPE temperature_kelvin gauge -# HELP temperature_kelvin Temperature in Kelvin. -temperature_kelvin{location="somewhere else"} 4.5 -` - - parseText := func() ([]*dto.MetricFamily, error) { - parsed, err := parser.TextToMetricFamilies(strings.NewReader(text)) - if err != nil { - return nil, err - } - var result []*dto.MetricFamily - for _, mf := range parsed { - result = append(result, mf) - } - return result, nil + completionTime.Set(float64(time.Now().Unix())) + if err := prometheus.PushCollectors( + "db_backup", hostname, + "http://pushgateway:9091", + completionTime, + ); err != nil { + fmt.Println("Could not push completion time to Pushgateway:", err) } - - gatherers := prometheus.Gatherers{ - reg, - prometheus.GathererFunc(parseText), - } - - gathering, err := gatherers.Gather() - if err != nil { - fmt.Println(err) - } - - out := &bytes.Buffer{} - for _, mf := range gathering { - if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { - panic(err) - } - } - fmt.Print(out.String()) - fmt.Println("----------") - - // Note how the temperature_kelvin metric family has been merged from - // different sources. Now try - text = ` -# TYPE humidity_percent gauge -# HELP humidity_percent Humidity in %. -humidity_percent{location="outside"} 45.4 -humidity_percent{location="inside"} 33.2 -# TYPE temperature_kelvin gauge -# HELP temperature_kelvin Temperature in Kelvin. -# Duplicate metric: -temperature_kelvin{location="outside"} 265.3 - # Wrong labels: -temperature_kelvin 4.5 -` - - gathering, err = gatherers.Gather() - if err != nil { - fmt.Println(err) - } - // Note that still as many metrics as possible are returned: - out.Reset() - for _, mf := range gathering { - if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { - panic(err) - } - } - fmt.Print(out.String()) - - // Output: - // # HELP humidity_percent Humidity in %. - // # TYPE humidity_percent gauge - // humidity_percent{location="inside"} 33.2 - // humidity_percent{location="outside"} 45.4 - // # HELP temperature_kelvin Temperature in Kelvin. - // # TYPE temperature_kelvin gauge - // temperature_kelvin{location="inside"} 298.44 - // temperature_kelvin{location="outside"} 273.14 - // temperature_kelvin{location="somewhere else"} 4.5 - // ---------- - // 2 error(s) occurred: - // * collected metric temperature_kelvin label: gauge: was collected before with the same name and label values - // * collected metric temperature_kelvin gauge: has label dimensions inconsistent with previously collected metrics in the same metric family - // # HELP humidity_percent Humidity in %. - // # TYPE humidity_percent gauge - // humidity_percent{location="inside"} 33.2 - // humidity_percent{location="outside"} 45.4 - // # HELP temperature_kelvin Temperature in Kelvin. - // # TYPE temperature_kelvin gauge - // temperature_kelvin{location="inside"} 298.44 - // temperature_kelvin{location="outside"} 273.14 } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go similarity index 81% rename from vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go rename to vendor/github.com/prometheus/client_golang/prometheus/expvar.go index 18a99d5faaa..0f7630d53f5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go @@ -18,21 +18,21 @@ import ( "expvar" ) -type expvarCollector struct { +// ExpvarCollector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the ExpvarCollector is inherently +// slow. Thus, the ExpvarCollector is probably great for experiments and +// prototying, but you should seriously consider a more direct implementation of +// Prometheus metrics for monitoring production systems. +// +// Use NewExpvarCollector to create new instances. +type ExpvarCollector struct { exports map[string]*Desc } -// NewExpvarCollector returns a newly allocated expvar Collector that still has -// to be registered with a Prometheus registry. -// -// An expvar Collector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the expvar Collector is inherently slower -// than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more -// direct implementation of Prometheus metrics for monitoring production -// systems. +// NewExpvarCollector returns a newly allocated ExpvarCollector that still has +// to be registered with the Prometheus registry. // // The exports map has the following meaning: // @@ -59,21 +59,21 @@ type expvarCollector struct { // sample values. // // Anything that does not fit into the scheme above is silently ignored. -func NewExpvarCollector(exports map[string]*Desc) Collector { - return &expvarCollector{ +func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector { + return &ExpvarCollector{ exports: exports, } } // Describe implements Collector. -func (e *expvarCollector) Describe(ch chan<- *Desc) { +func (e *ExpvarCollector) Describe(ch chan<- *Desc) { for _, desc := range e.exports { ch <- desc } } // Collect implements Collector. -func (e *expvarCollector) Collect(ch chan<- Metric) { +func (e *ExpvarCollector) Collect(ch chan<- Metric) { for name, desc := range e.exports { var m Metric expVar := expvar.Get(name) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_test.go similarity index 98% rename from vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go rename to vendor/github.com/prometheus/client_golang/prometheus/expvar_test.go index 910dac32584..5d3128faed6 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_test.go @@ -24,7 +24,7 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -func ExampleNewExpvarCollector() { +func ExampleExpvarCollector() { expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ "memstats": prometheus.NewDesc( "expvar_memstats", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go deleted file mode 100644 index e3b67df8ac0..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ /dev/null @@ -1,29 +0,0 @@ -package prometheus - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 13064dab899..ba8a402cafc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -13,6 +13,8 @@ package prometheus +import "hash/fnv" + // Gauge is a Metric that represents a single numerical value that can // arbitrarily go up and down. // @@ -27,21 +29,16 @@ type Gauge interface { // Set sets the Gauge to an arbitrary value. Set(float64) - // Inc increments the Gauge by 1. Use Add to increment it by arbitrary - // values. + // Inc increments the Gauge by 1. Inc() - // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary - // values. + // Dec decrements the Gauge by 1. Dec() - // Add adds the given value to the Gauge. (The value can be negative, - // resulting in a decrease of the Gauge.) + // Add adds the given value to the Gauge. (The value can be + // negative, resulting in a decrease of the Gauge.) Add(float64) // Sub subtracts the given value from the Gauge. (The value can be // negative, resulting in an increase of the Gauge.) Sub(float64) - - // SetToCurrentTime sets the Gauge to the current Unix time in seconds. - SetToCurrentTime() } // GaugeOpts is an alias for Opts. See there for doc comments. @@ -63,11 +60,12 @@ func NewGauge(opts GaugeOpts) Gauge { // (e.g. number of operations queued, partitioned by user and operation // type). Create instances with NewGaugeVec. type GaugeVec struct { - *metricVec + MetricVec } // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. +// partitioned by the given label names. At least one label name must be +// provided. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -76,57 +74,33 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { opts.ConstLabels, ) return &GaugeVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newValue(desc, GaugeValue, 0, lvs...) - }), + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, GaugeValue, 0, lvs...) + }, + }, } } -// GetMetricWithLabelValues returns the Gauge for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Gauge is created. -// -// It is possible to call this method without using the returned Gauge to only -// create the new Gauge but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Gauge for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Gauge from the GaugeVec. In that case, the -// Gauge will still exist, but it will not be exported anymore, even if a -// Gauge with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Gauge and not a +// Metric so that no type conversion is required. func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := m.metricVec.getMetricWithLabelValues(lvs...) + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Gauge), err } return nil, err } -// GetMetricWith returns the Gauge for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Gauge is created. Implications of -// creating a Gauge without using it and keeping the Gauge for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Gauge and not a Metric so that no +// type conversion is required. func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := m.metricVec.getMetricWith(labels) + metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Gauge), err } @@ -138,14 +112,14 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { // error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { - return m.metricVec.withLabelValues(lvs...).(Gauge) + return m.MetricVec.WithLabelValues(lvs...).(Gauge) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. By not returning an error, With allows shortcuts like // myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) func (m *GaugeVec) With(labels Labels) Gauge { - return m.metricVec.with(labels).(Gauge) + return m.MetricVec.With(labels).(Gauge) } // GaugeFunc is a Gauge whose value is determined at collect time by calling a diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go index 8e5f002c90f..48cab463671 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go @@ -19,7 +19,6 @@ import ( "sync" "testing" "testing/quick" - "time" dto "github.com/prometheus/client_model/go" ) @@ -181,22 +180,3 @@ func TestGaugeFunc(t *testing.T) { t.Errorf("expected %q, got %q", expected, got) } } - -func TestGaugeSetCurrentTime(t *testing.T) { - g := NewGauge(GaugeOpts{ - Name: "test_name", - Help: "test help", - }) - g.SetToCurrentTime() - unixTime := float64(time.Now().Unix()) - - m := &dto.Metric{} - g.Write(m) - - delta := unixTime - m.GetGauge().GetValue() - // This is just a smoke test to make sure SetToCurrentTime is not - // totally off. Tests with current time involved are hard... - if math.Abs(delta) > 5 { - t.Errorf("Gauge set to current time deviates from current time by more than 5s, delta is %f seconds", delta) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 096454af91b..8be24769518 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -8,10 +8,8 @@ import ( ) type goCollector struct { - goroutinesDesc *Desc - threadsDesc *Desc - gcDesc *Desc - goInfoDesc *Desc + goroutines Gauge + gcDesc *Desc // metrics to describe and collect metrics memStatsMetrics @@ -19,24 +17,17 @@ type goCollector struct { // NewGoCollector returns a collector which exports metrics about the current // go process. -func NewGoCollector() Collector { +func NewGoCollector() *goCollector { return &goCollector{ - goroutinesDesc: NewDesc( - "go_goroutines", - "Number of goroutines that currently exist.", - nil, nil), - threadsDesc: NewDesc( - "go_threads", - "Number of OS threads created.", - nil, nil), + goroutines: NewGauge(GaugeOpts{ + Namespace: "go", + Name: "goroutines", + Help: "Number of goroutines that currently exist.", + }), gcDesc: NewDesc( "go_gc_duration_seconds", "A summary of the GC invocation durations.", nil, nil), - goInfoDesc: NewDesc( - "go_info", - "Information about the Go environment.", - nil, Labels{"version": runtime.Version()}), metrics: memStatsMetrics{ { desc: NewDesc( @@ -57,7 +48,7 @@ func NewGoCollector() Collector { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained by system. Sum of all system allocations.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, @@ -120,12 +111,12 @@ func NewGoCollector() Collector { valType: GaugeValue, }, { desc: NewDesc( - memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + memstatNamespace("heap_released_bytes_total"), + "Total number of heap bytes released to OS.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: GaugeValue, + valType: CounterValue, }, { desc: NewDesc( memstatNamespace("heap_objects"), @@ -220,15 +211,7 @@ func NewGoCollector() Collector { "Number of seconds since 1970 of last garbage collection.", nil, nil, ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC*10 ^ 9) }, valType: GaugeValue, }, }, @@ -241,10 +224,9 @@ func memstatNamespace(s string) string { // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutinesDesc - ch <- c.threadsDesc + ch <- c.goroutines.Desc() ch <- c.gcDesc - ch <- c.goInfoDesc + for _, i := range c.metrics { ch <- i.desc } @@ -252,9 +234,8 @@ func (c *goCollector) Describe(ch chan<- *Desc) { // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { - ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + c.goroutines.Set(float64(runtime.NumGoroutine())) + ch <- c.goroutines var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) @@ -267,8 +248,6 @@ func (c *goCollector) Collect(ch chan<- Metric) { quantiles[0.0] = stats.PauseQuantiles[0].Seconds() ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) - ms := &runtime.MemStats{} runtime.ReadMemStats(ms) for _, i := range c.metrics { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go index 2d05118f331..9a8858cbd2b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go @@ -29,37 +29,33 @@ func TestGoCollector(t *testing.T) { for { select { - case m := <-ch: - // m can be Gauge or Counter, - // currently just test the go_goroutines Gauge - // and ignore others. - if m.Desc().fqName != "go_goroutines" { - continue - } - pb := &dto.Metric{} - m.Write(pb) - if pb.GetGauge() == nil { - continue - } + case metric := <-ch: + switch m := metric.(type) { + // Attention, this also catches Counter... + case Gauge: + pb := &dto.Metric{} + m.Write(pb) + if pb.GetGauge() == nil { + continue + } - if old == -1 { - old = int(pb.GetGauge().GetValue()) - close(waitc) - continue - } + if old == -1 { + old = int(pb.GetGauge().GetValue()) + close(waitc) + continue + } - if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { - // TODO: This is flaky in highly concurrent situations. - t.Errorf("want 1 new goroutine, got %d", diff) - } + if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { + // TODO: This is flaky in highly concurrent situations. + t.Errorf("want 1 new goroutine, got %d", diff) + } - // GoCollector performs three sends per call. - // On line 27 we need to receive three more sends - // to shut down cleanly. - <-ch - <-ch - <-ch - return + // GoCollector performs two sends per call. + // On line 27 we need to receive the second send + // to shut down cleanly. + <-ch + return + } case <-time.After(1 * time.Second): t.Fatalf("expected collect timed out") } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 6cc6e68cf48..f98a41bc89d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -15,6 +15,7 @@ package prometheus import ( "fmt" + "hash/fnv" "math" "sort" "sync/atomic" @@ -51,11 +52,11 @@ type Histogram interface { // bucket of a histogram ("le" -> "less or equal"). const bucketLabel = "le" -// DefBuckets are the default Histogram buckets. The default buckets are -// tailored to broadly measure the response time (in seconds) of a network -// service. Most likely, however, you will be required to define buckets -// customized to your use case. var ( + // DefBuckets are the default Histogram buckets. The default buckets are + // tailored to broadly measure the response time (in seconds) of a + // network service. Most likely, however, you will be required to define + // buckets customized to your use case. DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} errBucketLabelNotAllowed = fmt.Errorf( @@ -210,7 +211,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr // Finally we know the final length of h.upperBounds and can make counts. h.counts = make([]uint64, len(h.upperBounds)) - h.init(h) // Init self-collection. + h.Init(h) // Init self-collection. return h } @@ -222,7 +223,7 @@ type histogram struct { sumBits uint64 count uint64 - selfCollector + SelfCollector // Note that there is no mutex required. desc *Desc @@ -287,11 +288,12 @@ func (h *histogram) Write(out *dto.Metric) error { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewHistogramVec. type HistogramVec struct { - *metricVec + MetricVec } // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. +// partitioned by the given label names. At least one label name must be +// provided. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -300,60 +302,35 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { opts.ConstLabels, ) return &HistogramVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) - }), + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }, + }, } } -// GetMetricWithLabelValues returns the Histogram for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Histogram is created. -// -// It is possible to call this method without using the returned Histogram to only -// create the new Histogram but leave it at its starting value, a Histogram without -// any observations. -// -// Keeping the Histogram for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Histogram from the HistogramVec. In that case, the -// Histogram will still exist, but it will not be exported anymore, even if a -// Histogram with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := m.metricVec.getMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Histogram and not a +// Metric so that no type conversion is required. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Observer), err + return metric.(Histogram), err } return nil, err } -// GetMetricWith returns the Histogram for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Histogram is created. Implications of -// creating a Histogram without using it and keeping the Histogram for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := m.metricVec.getMetricWith(labels) +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Histogram and not a Metric so that no +// type conversion is required. +func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { - return metric.(Observer), err + return metric.(Histogram), err } return nil, err } @@ -362,15 +339,15 @@ func (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { // GetMetricWithLabelValues would have returned an error. By not returning an // error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *HistogramVec) WithLabelValues(lvs ...string) Observer { - return m.metricVec.withLabelValues(lvs...).(Observer) +func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { + return m.MetricVec.WithLabelValues(lvs...).(Histogram) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. By not returning an error, With allows shortcuts like // myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *HistogramVec) With(labels Labels) Observer { - return m.metricVec.with(labels).(Observer) +func (m *HistogramVec) With(labels Labels) Histogram { + return m.MetricVec.With(labels).(Histogram) } type constHistogram struct { @@ -430,8 +407,8 @@ func NewConstHistogram( buckets map[float64]uint64, labelValues ...string, ) (Metric, error) { - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality } return &constHistogram{ desc: desc, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go index 5a20f4b6b8b..11cf66b4fe2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go @@ -119,28 +119,6 @@ func BenchmarkHistogramWrite8(b *testing.B) { benchmarkHistogramWrite(8, b) } -func TestHistogramNonMonotonicBuckets(t *testing.T) { - testCases := map[string][]float64{ - "not strictly monotonic": {1, 2, 2, 3}, - "not monotonic at all": {1, 2, 4, 3, 5}, - "have +Inf in the middle": {1, 2, math.Inf(+1), 3}, - } - for name, buckets := range testCases { - func() { - defer func() { - if r := recover(); r == nil { - t.Errorf("Buckets %v are %s but NewHistogram did not panic.", buckets, name) - } - }() - _ = NewHistogram(HistogramOpts{ - Name: "test_histogram", - Help: "helpless", - Buckets: buckets, - }) - }() - } -} - // Intentionally adding +Inf here to test if that case is handled correctly. // Also, getCumulativeCounts depends on it. var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)} @@ -149,7 +127,7 @@ func TestHistogramConcurrency(t *testing.T) { if testing.Short() { t.Skip("Skipping test in short mode.") } - + rand.Seed(42) it := func(n uint32) bool { @@ -286,7 +264,7 @@ func TestHistogramVecConcurrency(t *testing.T) { for i := 0; i < vecLength; i++ { m := &dto.Metric{} s := his.WithLabelValues(string('A' + i)) - s.(Histogram).Write(m) + s.Write(m) if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { t.Errorf("got %d buckets in protobuf, want %d", got, want) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go index bfee5c6eb37..eabe602468f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -15,115 +15,14 @@ package prometheus import ( "bufio" - "bytes" - "compress/gzip" - "fmt" "io" "net" "net/http" "strconv" "strings" - "sync" "time" - - "github.com/prometheus/common/expfmt" -) - -// TODO(beorn7): Remove this whole file. It is a partial mirror of -// promhttp/http.go (to avoid circular import chains) where everything HTTP -// related should live. The functions here are just for avoiding -// breakage. Everything is deprecated. - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" ) -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) -} - -// Handler returns an HTTP handler for the DefaultGatherer. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). -// -// Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using promhttp.Handler instead -// (which is not instrumented, but can be instrumented with the tooling provided -// in package promhttp). -func Handler() http.Handler { - return InstrumentHandler("prometheus", UninstrumentedHandler()) -} - -// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. -// -// Deprecated: Use promhttp.Handler instead. See there for further documentation. -func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - mfs, err := DefaultGatherer.Gather() - if err != nil { - http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - - contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf) - enc := expfmt.NewEncoder(writer, contentType) - var lastErr error - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - lastErr = err - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) - return - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) - }) -} - -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} - var instLabels = []string{"method", "code"} type nower interface { @@ -158,52 +57,29 @@ func nowSeries(t ...time.Time) nower { // has a constant label named "handler" with the provided handlerName as // value. http_requests_total is a metric vector partitioned by HTTP method // (label name "method") and HTTP status code (label name "code"). -// -// Deprecated: InstrumentHandler has several issues. Use the tooling provided in -// package promhttp instead. The issues are the following: -// -// - It uses Summaries rather than Histograms. Summaries are not useful if -// aggregation across multiple instances is required. -// -// - It uses microseconds as unit, which is deprecated and should be replaced by -// seconds. -// -// - The size of the request is calculated in a separate goroutine. Since this -// calculator requires access to the request header, it creates a race with -// any writes to the header performed during request handling. -// httputil.ReverseProxy is a prominent example for a handler -// performing such writes. -// -// - It has additional issues with HTTP/2, cf. -// https://github.com/prometheus/client_golang/issues/272. func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) } // InstrumentHandlerFunc wraps the given function for instrumentation. It -// otherwise works in the same way as InstrumentHandler (and shares the same -// issues). -// -// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. +// otherwise works in the same way as InstrumentHandler. func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { return InstrumentHandlerFuncWithOpts( SummaryOpts{ Subsystem: "http", ConstLabels: Labels{"handler": handlerName}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }, handlerFunc, ) } -// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same -// issues) but provides more flexibility (at the cost of a more complex call -// syntax). As InstrumentHandler, this function registers four metric -// collectors, but it uses the provided SummaryOpts to create them. However, the -// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced -// by "requests_total", "request_duration_microseconds", "request_size_bytes", -// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// InstrumentHandlerWithOpts works like InstrumentHandler but provides more +// flexibility (at the cost of a more complex call syntax). As +// InstrumentHandler, this function registers four metric collectors, but it +// uses the provided SummaryOpts to create them. However, the fields "Name" and +// "Help" in the SummaryOpts are ignored. "Name" is replaced by +// "requests_total", "request_duration_microseconds", "request_size_bytes", and +// "response_size_bytes", respectively. "Help" is replaced by an appropriate // help string. The names of the variable labels of the http_requests_total // CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). // @@ -222,20 +98,13 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri // cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, // and all its fields are set to the equally named fields in the provided // SummaryOpts. -// -// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) } -// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares -// the same issues) but provides more flexibility (at the cost of a more complex -// call syntax). See InstrumentHandlerWithOpts for details how the provided -// SummaryOpts are used. -// -// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. Use the tooling provided in package promhttp instead. +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides +// more flexibility (at the cost of a more complex call syntax). See +// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used. func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { reqCnt := NewCounterVec( CounterOpts{ @@ -247,52 +116,34 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo }, instLabels, ) - if err := Register(reqCnt); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqCnt = are.ExistingCollector.(*CounterVec) - } else { - panic(err) - } - } opts.Name = "request_duration_microseconds" opts.Help = "The HTTP request latencies in microseconds." reqDur := NewSummary(opts) - if err := Register(reqDur); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqDur = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } opts.Name = "request_size_bytes" opts.Help = "The HTTP request sizes in bytes." reqSz := NewSummary(opts) - if err := Register(reqSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } opts.Name = "response_size_bytes" opts.Help = "The HTTP response sizes in bytes." resSz := NewSummary(opts) - if err := Register(resSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - resSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } + + regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) + regReqDur := MustRegisterOrGet(reqDur).(Summary) + regReqSz := MustRegisterOrGet(reqSz).(Summary) + regResSz := MustRegisterOrGet(resSz).(Summary) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { now := time.Now() delegate := &responseWriterDelegator{ResponseWriter: w} - out := computeApproximateRequestSize(r) + out := make(chan int) + urlLen := 0 + if r.URL != nil { + urlLen = len(r.URL.String()) + } + go computeApproximateRequestSize(r, out, urlLen) _, cn := w.(http.CloseNotifier) _, fl := w.(http.Flusher) @@ -310,44 +161,30 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo method := sanitizeMethod(r.Method) code := sanitizeCode(delegate.status) - reqCnt.WithLabelValues(method, code).Inc() - reqDur.Observe(elapsed) - resSz.Observe(float64(delegate.written)) - reqSz.Observe(float64(<-out)) + regReqCnt.WithLabelValues(method, code).Inc() + regReqDur.Observe(elapsed) + regResSz.Observe(float64(delegate.written)) + regReqSz.Observe(float64(<-out)) }) } -func computeApproximateRequestSize(r *http.Request) <-chan int { - // Get URL length in current go routine for avoiding a race condition. - // HandlerFunc that runs in parallel may modify the URL. - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - out := make(chan int, 1) - - go func() { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } +func computeApproximateRequestSize(r *http.Request, out chan int, s int) { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + } + s += len(r.Host) - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s - close(out) - }() + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - return out + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s } type responseWriterDelegator struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go index 7fd4077b9ad..ffe0418cf87 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go @@ -44,10 +44,9 @@ func TestInstrumentHandler(t *testing.T) { opts := SummaryOpts{ Subsystem: "http", ConstLabels: Labels{"handler": "test-handler"}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, } - reqCnt := NewCounterVec( + reqCnt := MustRegisterOrGet(NewCounterVec( CounterOpts{ Namespace: opts.Namespace, Subsystem: opts.Subsystem, @@ -56,51 +55,19 @@ func TestInstrumentHandler(t *testing.T) { ConstLabels: opts.ConstLabels, }, instLabels, - ) - err := Register(reqCnt) - if err == nil { - t.Fatal("expected reqCnt to be registered already") - } - if are, ok := err.(AlreadyRegisteredError); ok { - reqCnt = are.ExistingCollector.(*CounterVec) - } else { - t.Fatal("unexpected registration error:", err) - } + )).(*CounterVec) opts.Name = "request_duration_microseconds" opts.Help = "The HTTP request latencies in microseconds." - reqDur := NewSummary(opts) - err = Register(reqDur) - if err == nil { - t.Fatal("expected reqDur to be registered already") - } - if are, ok := err.(AlreadyRegisteredError); ok { - reqDur = are.ExistingCollector.(Summary) - } else { - t.Fatal("unexpected registration error:", err) - } + reqDur := MustRegisterOrGet(NewSummary(opts)).(Summary) opts.Name = "request_size_bytes" opts.Help = "The HTTP request sizes in bytes." - reqSz := NewSummary(opts) - err = Register(reqSz) - if err == nil { - t.Fatal("expected reqSz to be registered already") - } - if _, ok := err.(AlreadyRegisteredError); !ok { - t.Fatal("unexpected registration error:", err) - } + MustRegisterOrGet(NewSummary(opts)) opts.Name = "response_size_bytes" opts.Help = "The HTTP response sizes in bytes." - resSz := NewSummary(opts) - err = Register(resSz) - if err == nil { - t.Fatal("expected resSz to be registered already") - } - if _, ok := err.(AlreadyRegisteredError); !ok { - t.Fatal("unexpected registration error:", err) - } + MustRegisterOrGet(NewSummary(opts)) reqCnt.Reset() diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go deleted file mode 100644 index 2502e37348f..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ /dev/null @@ -1,57 +0,0 @@ -package prometheus - -import ( - "errors" - "fmt" - "strings" - "unicode/utf8" - - "github.com/prometheus/common/model" -) - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { - if len(labels) != expectedNumberOfValues { - return errInconsistentCardinality - } - - for name, val := range labels { - if !utf8.ValidString(val) { - return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) - } - } - - return nil -} - -func validateLabelValues(vals []string, expectedNumberOfValues int) error { - if len(vals) != expectedNumberOfValues { - return errInconsistentCardinality - } - - for _, val := range vals { - if !utf8.ValidString(val) { - return fmt.Errorf("label value %q is not valid UTF-8", val) - } - } - - return nil -} - -func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index d4063d98f41..86fd81c108b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -22,8 +22,10 @@ import ( const separatorByte byte = 255 // A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementations of Metric in this package are Gauge, Counter, -// Histogram, Summary, and Untyped. +// Prometheus. Implementers of Metric in this package inclued Gauge, Counter, +// Untyped, and Summary. Users can implement their own Metric types, but that +// should be rarely needed. See the example for SelfCollector, which is also an +// example for a user-implemented Metric. type Metric interface { // Desc returns the descriptor for the Metric. This method idempotently // returns the same descriptor throughout the lifetime of the @@ -34,23 +36,21 @@ type Metric interface { // Write encodes the Metric into a "Metric" Protocol Buffer data // transmission object. // - // Metric implementations must observe concurrency safety as reads of - // this metric may occur at any time, and any blocking occurs at the - // expense of total performance of rendering all registered - // metrics. Ideally, Metric implementations should support concurrent - // readers. + // Implementers of custom Metric types must observe concurrency safety + // as reads of this metric may occur at any time, and any blocking + // occurs at the expense of total performance of rendering all + // registered metrics. Ideally Metric implementations should support + // concurrent readers. // - // While populating dto.Metric, it is the responsibility of the - // implementation to ensure validity of the Metric protobuf (like valid - // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. (Implementers may find - // LabelPairSorter useful for that.) Callers of Write should still make - // sure of sorting if they depend on it. + // The Prometheus client library attempts to minimize memory allocations + // and will provide a pre-existing reset dto.Metric pointer. Prometheus + // may recycle the dto.Metric proto message, so Metric implementations + // should just populate the provided dto.Metric and then should not keep + // any reference to it. + // + // While populating dto.Metric, labels must be sorted lexicographically. + // (Implementers may find LabelPairSorter useful for that.) Write(*dto.Metric) error - // TODO(beorn7): The original rationale of passing in a pre-allocated - // dto.Metric protobuf to save allocations has disappeared. The - // signature of this method should be changed to "Write() (*dto.Metric, - // error)". } // Opts bundles the options for creating most Metric types. Each metric diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go deleted file mode 100644 index b0520e85e8b..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Observer is the interface that wraps the Observe method, which is used by -// Histogram and Summary to add observations. -type Observer interface { - Observe(float64) -} - -// The ObserverFunc type is an adapter to allow the use of ordinary -// functions as Observers. If f is a function with the appropriate -// signature, ObserverFunc(f) is an Observer that calls f. -// -// This adapter is usually used in connection with the Timer type, and there are -// two general use cases: -// -// The most common one is to use a Gauge as the Observer for a Timer. -// See the "Gauge" Timer example. -// -// The more advanced use case is to create a function that dynamically decides -// which Observer to use for observing the duration. See the "Complex" Timer -// example. -type ObserverFunc func(float64) - -// Observe calls f(value). It implements Observer. -func (f ObserverFunc) Observe(value float64) { - f(value) -} - -// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. -type ObserverVec interface { - GetMetricWith(Labels) (Observer, error) - GetMetricWithLabelValues(lvs ...string) (Observer, error) - With(Labels) Observer - WithLabelValues(...string) Observer - - Collector -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 94b2553e14a..d8cf0eda347 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -19,16 +19,16 @@ type processCollector struct { pid int collectFn func(chan<- Metric) pidFn func() (int, error) - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, rss *Desc - startTime *Desc + cpuTotal Counter + openFDs, maxFDs Gauge + vsize, rss Gauge + startTime Gauge } // NewProcessCollector returns a collector which exports the current state of // process metrics including cpu, memory and file descriptor usage as well as // the process start time for the given process id under the given namespace. -func NewProcessCollector(pid int, namespace string) Collector { +func NewProcessCollector(pid int, namespace string) *processCollector { return NewProcessCollectorPIDFn( func() (int, error) { return pid, nil }, namespace, @@ -43,46 +43,41 @@ func NewProcessCollector(pid int, namespace string) Collector { func NewProcessCollectorPIDFn( pidFn func() (int, error), namespace string, -) Collector { - ns := "" - if len(namespace) > 0 { - ns = namespace + "_" - } - +) *processCollector { c := processCollector{ pidFn: pidFn, collectFn: func(chan<- Metric) {}, - cpuTotal: NewDesc( - ns+"process_cpu_seconds_total", - "Total user and system CPU time spent in seconds.", - nil, nil, - ), - openFDs: NewDesc( - ns+"process_open_fds", - "Number of open file descriptors.", - nil, nil, - ), - maxFDs: NewDesc( - ns+"process_max_fds", - "Maximum number of open file descriptors.", - nil, nil, - ), - vsize: NewDesc( - ns+"process_virtual_memory_bytes", - "Virtual memory size in bytes.", - nil, nil, - ), - rss: NewDesc( - ns+"process_resident_memory_bytes", - "Resident memory size in bytes.", - nil, nil, - ), - startTime: NewDesc( - ns+"process_start_time_seconds", - "Start time of the process since unix epoch in seconds.", - nil, nil, - ), + cpuTotal: NewCounter(CounterOpts{ + Namespace: namespace, + Name: "process_cpu_seconds_total", + Help: "Total user and system CPU time spent in seconds.", + }), + openFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_open_fds", + Help: "Number of open file descriptors.", + }), + maxFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_max_fds", + Help: "Maximum number of open file descriptors.", + }), + vsize: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_virtual_memory_bytes", + Help: "Virtual memory size in bytes.", + }), + rss: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_resident_memory_bytes", + Help: "Resident memory size in bytes.", + }), + startTime: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + }), } // Set up process metric collection if supported by the runtime. @@ -95,12 +90,12 @@ func NewProcessCollectorPIDFn( // Describe returns all descriptions of the collector. func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.rss - ch <- c.startTime + ch <- c.cpuTotal.Desc() + ch <- c.openFDs.Desc() + ch <- c.maxFDs.Desc() + ch <- c.vsize.Desc() + ch <- c.rss.Desc() + ch <- c.startTime.Desc() } // Collect returns the current state of all metrics of the collector. @@ -122,19 +117,26 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } if stat, err := p.NewStat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + c.cpuTotal.Set(stat.CPUTime()) + ch <- c.cpuTotal + c.vsize.Set(float64(stat.VirtualMemory())) + ch <- c.vsize + c.rss.Set(float64(stat.ResidentMemory())) + ch <- c.rss + if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + c.startTime.Set(startTime) + ch <- c.startTime } } if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + c.openFDs.Set(float64(fds)) + ch <- c.openFDs } if limits, err := p.NewLimits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + c.maxFDs.Set(float64(limits.OpenFiles)) + ch <- c.maxFDs } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go index c7acb47fe9d..829715acd8f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go @@ -1,12 +1,13 @@ package prometheus import ( - "bytes" + "io/ioutil" + "net/http" + "net/http/httptest" "os" "regexp" "testing" - "github.com/prometheus/common/expfmt" "github.com/prometheus/procfs" ) @@ -15,44 +16,39 @@ func TestProcessCollector(t *testing.T) { t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) } - registry := NewRegistry() - if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil { - t.Fatal(err) - } - if err := registry.Register(NewProcessCollectorPIDFn( - func() (int, error) { return os.Getpid(), nil }, "foobar"), - ); err != nil { - t.Fatal(err) - } + registry := newRegistry() + registry.Register(NewProcessCollector(os.Getpid(), "")) + registry.Register(NewProcessCollectorPIDFn( + func() (int, error) { return os.Getpid(), nil }, "foobar")) - mfs, err := registry.Gather() + s := httptest.NewServer(InstrumentHandler("prometheus", registry)) + defer s.Close() + r, err := http.Get(s.URL) if err != nil { t.Fatal(err) } - - var buf bytes.Buffer - for _, mf := range mfs { - if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil { - t.Fatal(err) - } + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) } for _, re := range []*regexp.Regexp{ - regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"), - regexp.MustCompile("\nprocess_max_fds [1-9]"), - regexp.MustCompile("\nprocess_open_fds [1-9]"), - regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"), - regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"), - regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"), - regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"), - regexp.MustCompile("\nfoobar_process_max_fds [1-9]"), - regexp.MustCompile("\nfoobar_process_open_fds [1-9]"), - regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"), - regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"), - regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"), + regexp.MustCompile("process_cpu_seconds_total [0-9]"), + regexp.MustCompile("process_max_fds [0-9]{2,}"), + regexp.MustCompile("process_open_fds [1-9]"), + regexp.MustCompile("process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("process_resident_memory_bytes [1-9]"), + regexp.MustCompile("process_start_time_seconds [0-9.]{10,}"), + regexp.MustCompile("foobar_process_cpu_seconds_total [0-9]"), + regexp.MustCompile("foobar_process_max_fds [0-9]{2,}"), + regexp.MustCompile("foobar_process_open_fds [1-9]"), + regexp.MustCompile("foobar_process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"), + regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"), } { - if !re.Match(buf.Bytes()) { - t.Errorf("want body to match %s\n%s", re, buf.String()) + if !re.Match(body) { + t.Errorf("want body to match %s\n%s", re, body) } } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push.go new file mode 100644 index 00000000000..1c33848a353 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/push.go @@ -0,0 +1,65 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus + +// Push triggers a metric collection by the default registry and pushes all +// collected metrics to the Pushgateway specified by addr. See the Pushgateway +// documentation for detailed implications of the job and instance +// parameter. instance can be left empty. You can use just host:port or ip:port +// as url, in which case 'http://' is added automatically. You can also include +// the schema in the URL. However, do not include the '/metrics/jobs/...' part. +// +// Note that all previously pushed metrics with the same job and instance will +// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT' +// to push to the Pushgateway.) +func Push(job, instance, url string) error { + return defRegistry.Push(job, instance, url, "PUT") +} + +// PushAdd works like Push, but only previously pushed metrics with the same +// name (and the same job and instance) will be replaced. (It uses HTTP method +// 'POST' to push to the Pushgateway.) +func PushAdd(job, instance, url string) error { + return defRegistry.Push(job, instance, url, "POST") +} + +// PushCollectors works like Push, but it does not collect from the default +// registry. Instead, it collects from the provided collectors. It is a +// convenient way to push only a few metrics. +func PushCollectors(job, instance, url string, collectors ...Collector) error { + return pushCollectors(job, instance, url, "PUT", collectors...) +} + +// PushAddCollectors works like PushAdd, but it does not collect from the +// default registry. Instead, it collects from the provided collectors. It is a +// convenient way to push only a few metrics. +func PushAddCollectors(job, instance, url string, collectors ...Collector) error { + return pushCollectors(job, instance, url, "POST", collectors...) +} + +func pushCollectors(job, instance, url, method string, collectors ...Collector) error { + r := newRegistry() + for _, collector := range collectors { + if _, err := r.Register(collector); err != nil { + return err + } + } + return r.Push(job, instance, url, method) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 8f2094cce5f..5970aaeeba9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -11,245 +11,228 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + package prometheus import ( "bytes" + "compress/gzip" "errors" "fmt" + "hash/fnv" + "io" + "net/http" + "net/url" "os" "sort" + "strings" "sync" - "unicode/utf8" "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" ) +var ( + defRegistry = newDefaultRegistry() + errAlreadyReg = errors.New("duplicate metrics collector registration attempted") +) + +// Constants relevant to the HTTP interface. const ( + // APIVersion is the version of the format of the exported data. This + // will match this library's version, which subscribes to the Semantic + // Versioning scheme. + APIVersion = "0.0.4" + + // DelimitedTelemetryContentType is the content type set on telemetry + // data responses in delimited protobuf format. + DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited` + // TextTelemetryContentType is the content type set on telemetry data + // responses in text format. + TextTelemetryContentType = `text/plain; version=` + APIVersion + // ProtoTextTelemetryContentType is the content type set on telemetry + // data responses in protobuf text format. (Only used for debugging.) + ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text` + // ProtoCompactTextTelemetryContentType is the content type set on + // telemetry data responses in protobuf compact text format. (Only used + // for debugging.) + ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text` + + // Constants for object pools. + numBufs = 4 + numMetricFamilies = 1000 + numMetrics = 10000 + // Capacity for the channel to collect metrics and descriptors. capMetricChan = 1000 capDescChan = 10 -) -// DefaultRegisterer and DefaultGatherer are the implementations of the -// Registerer and Gatherer interface a number of convenience functions in this -// package act on. Initially, both variables point to the same Registry, which -// has a process collector (see NewProcessCollector) and a Go collector (see -// NewGoCollector) already registered. This approach to keep default instances -// as global state mirrors the approach of other packages in the Go standard -// library. Note that there are caveats. Change the variables with caution and -// only if you understand the consequences. Users who want to avoid global state -// altogether should not use the convenience function and act on custom -// instances instead. -var ( - defaultRegistry = NewRegistry() - DefaultRegisterer Registerer = defaultRegistry - DefaultGatherer Gatherer = defaultRegistry + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + + acceptEncodingHeader = "Accept-Encoding" + acceptHeader = "Accept" ) -func init() { - MustRegister(NewProcessCollector(os.Getpid(), "")) - MustRegister(NewGoCollector()) +// Handler returns the HTTP handler for the global Prometheus registry. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). Usually the handler is used to handle the "/metrics" endpoint. +func Handler() http.Handler { + return InstrumentHandler("prometheus", defRegistry) } -// NewRegistry creates a new vanilla Registry without any Collectors -// pre-registered. -func NewRegistry() *Registry { - return &Registry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - } +// UninstrumentedHandler works in the same way as Handler, but the returned HTTP +// handler is not instrumented. This is useful if no instrumentation is desired +// (for whatever reason) or if the instrumentation has to happen with a +// different handler name (or with a different instrumentation approach +// altogether). See the InstrumentHandler example. +func UninstrumentedHandler() http.Handler { + return defRegistry } -// NewPedanticRegistry returns a registry that checks during collection if each -// collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. +// Register registers a new Collector to be included in metrics collection. It +// returns an error if the descriptors provided by the Collector are invalid or +// if they - in combination with descriptors of already registered Collectors - +// do not fulfill the consistency and uniqueness criteria described in the Desc +// documentation. // -// Usually, a Registry will be happy as long as the union of all collected -// Metrics is consistent and valid even if some metrics are not consistent with -// their own Desc or a Desc provided by their registered Collector. Well-behaved -// Collectors and Metrics will only provide consistent Descs. This Registry is -// useful to test the implementation of Collectors and Metrics. -func NewPedanticRegistry() *Registry { - r := NewRegistry() - r.pedanticChecksEnabled = true - return r -} - -// Registerer is the interface for the part of a registry in charge of -// registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather than the Registry type -// directly). In that way, they are free to use custom Registerer implementation -// (e.g. for testing purposes). -type Registerer interface { - // Register registers a new Collector to be included in metrics - // collection. It returns an error if the descriptors provided by the - // Collector are invalid or if they — in combination with descriptors of - // already registered Collectors — do not fulfill the consistency and - // uniqueness criteria described in the documentation of metric.Desc. - // - // If the provided Collector is equal to a Collector already registered - // (which includes the case of re-registering the same Collector), the - // returned error is an instance of AlreadyRegisteredError, which - // contains the previously registered Collector. - // - // It is in general not safe to register the same Collector multiple - // times concurrently. - Register(Collector) error - // MustRegister works like Register but registers any number of - // Collectors and panics upon the first registration that causes an - // error. - MustRegister(...Collector) - // Unregister unregisters the Collector that equals the Collector passed - // in as an argument. (Two Collectors are considered equal if their - // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. - // - // Note that even after unregistering, it will not be possible to - // register a new Collector that is inconsistent with the unregistered - // Collector, e.g. a Collector collecting metrics with the same name but - // a different help string. The rationale here is that the same registry - // instance must only collect consistent metrics throughout its - // lifetime. - Unregister(Collector) bool +// Do not register the same Collector multiple times concurrently. (Registering +// the same Collector twice would result in an error anyway, but on top of that, +// it is not safe to do so concurrently.) +func Register(m Collector) error { + _, err := defRegistry.Register(m) + return err } -// Gatherer is the interface for the part of a registry in charge of gathering -// the collected metrics into a number of MetricFamilies. The Gatherer interface -// comes with the same general implication as described for the Registerer -// interface. -type Gatherer interface { - // Gather calls the Collect method of the registered Collectors and then - // gathers the collected metrics into a lexicographically sorted slice - // of MetricFamily protobufs. Even if an error occurs, Gather attempts - // to gather as many metrics as possible. Hence, if a non-nil error is - // returned, the returned MetricFamily slice could be nil (in case of a - // fatal error that prevented any meaningful metric collection) or - // contain a number of MetricFamily protobufs, some of which might be - // incomplete, and some might be missing altogether. The returned error - // (which might be a MultiError) explains the details. In scenarios - // where complete collection is critical, the returned MetricFamily - // protobufs should be disregarded if the returned error is non-nil. - Gather() ([]*dto.MetricFamily, error) +// MustRegister works like Register but panics where Register would have +// returned an error. +func MustRegister(m Collector) { + err := Register(m) + if err != nil { + panic(err) + } } -// Register registers the provided Collector with the DefaultRegisterer. +// RegisterOrGet works like Register but does not return an error if a Collector +// is registered that equals a previously registered Collector. (Two Collectors +// are considered equal if their Describe method yields the same set of +// descriptors.) Instead, the previously registered Collector is returned (which +// is helpful if the new and previously registered Collectors are equal but not +// identical, i.e. not pointers to the same object). // -// Register is a shortcut for DefaultRegisterer.Register(c). See there for more -// details. -func Register(c Collector) error { - return DefaultRegisterer.Register(c) +// As for Register, it is still not safe to call RegisterOrGet with the same +// Collector multiple times concurrently. +func RegisterOrGet(m Collector) (Collector, error) { + return defRegistry.RegisterOrGet(m) } -// MustRegister registers the provided Collectors with the DefaultRegisterer and -// panics if any error occurs. -// -// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See -// there for more details. -func MustRegister(cs ...Collector) { - DefaultRegisterer.MustRegister(cs...) +// MustRegisterOrGet works like Register but panics where RegisterOrGet would +// have returned an error. +func MustRegisterOrGet(m Collector) Collector { + existing, err := RegisterOrGet(m) + if err != nil { + panic(err) + } + return existing } -// Unregister removes the registration of the provided Collector from the -// DefaultRegisterer. -// -// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for -// more details. +// Unregister unregisters the Collector that equals the Collector passed in as +// an argument. (Two Collectors are considered equal if their Describe method +// yields the same set of descriptors.) The function returns whether a Collector +// was unregistered. func Unregister(c Collector) bool { - return DefaultRegisterer.Unregister(c) -} - -// GathererFunc turns a function into a Gatherer. -type GathererFunc func() ([]*dto.MetricFamily, error) - -// Gather implements Gatherer. -func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { - return gf() + return defRegistry.Unregister(c) } -// AlreadyRegisteredError is returned by the Register method if the Collector to -// be registered has already been registered before, or a different Collector -// that collects the same metrics has been registered before. Registration fails -// in that case, but you can detect from the kind of error what has -// happened. The error contains fields for the existing Collector and the -// (rejected) new Collector that equals the existing one. This can be used to -// find out if an equal Collector has been registered before and switch over to -// using the old one, as demonstrated in the example. -type AlreadyRegisteredError struct { - ExistingCollector, NewCollector Collector -} - -func (err AlreadyRegisteredError) Error() string { - return "duplicate metrics collector registration attempted" +// SetMetricFamilyInjectionHook sets a function that is called whenever metrics +// are collected. The hook function must be set before metrics collection begins +// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The +// MetricFamily protobufs returned by the hook function are merged with the +// metrics collected in the usual way. +// +// This is a way to directly inject MetricFamily protobufs managed and owned by +// the caller. The caller has full responsibility. As no registration of the +// injected metrics has happened, there is no descriptor to check against, and +// there are no registration-time checks. If collect-time checks are disabled +// (see function EnableCollectChecks), no sanity checks are performed on the +// returned protobufs at all. If collect-checks are enabled, type and uniqueness +// checks are performed, but no further consistency checks (which would require +// knowledge of a metric descriptor). +// +// Sorting concerns: The caller is responsible for sorting the label pairs in +// each metric. However, the order of metrics will be sorted by the registry as +// it is required anyway after merging with the metric families collected +// conventionally. +// +// The function must be callable at any time and concurrently. +func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { + defRegistry.metricFamilyInjectionHook = hook } -// MultiError is a slice of errors implementing the error interface. It is used -// by a Gatherer to report multiple errors during MetricFamily gathering. -type MultiError []error - -func (errs MultiError) Error() string { - if len(errs) == 0 { - return "" - } - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) - for _, err := range errs { - fmt.Fprintf(buf, "\n* %s", err) - } - return buf.String() +// PanicOnCollectError sets the behavior whether a panic is caused upon an error +// while metrics are collected and served to the HTTP endpoint. By default, an +// internal server error (status code 500) is served with an error message. +func PanicOnCollectError(b bool) { + defRegistry.panicOnCollectError = b } -// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only -// contained error as error if len(errs is 1). In all other cases, it returns -// the MultiError directly. This is helpful for returning a MultiError in a way -// that only uses the MultiError if needed. -func (errs MultiError) MaybeUnwrap() error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return errs - } +// EnableCollectChecks enables (or disables) additional consistency checks +// during metrics collection. These additional checks are not enabled by default +// because they inflict a performance penalty and the errors they check for can +// only happen if the used Metric and Collector types have internal programming +// errors. It can be helpful to enable these checks while working with custom +// Collectors or Metrics whose correctness is not well established yet. +func EnableCollectChecks(b bool) { + defRegistry.collectChecksEnabled = b } -// Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. -type Registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - pedanticChecksEnabled bool +// encoder is a function that writes a dto.MetricFamily to an io.Writer in a +// certain encoding. It returns the number of bytes written and any error +// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText +// are encoders. +type encoder func(io.Writer, *dto.MetricFamily) (int, error) + +type registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + bufPool chan *bytes.Buffer + metricFamilyPool chan *dto.MetricFamily + metricPool chan *dto.Metric + metricFamilyInjectionHook func() []*dto.MetricFamily + + panicOnCollectError, collectChecksEnabled bool } -// Register implements Registerer. -func (r *Registry) Register(c Collector) error { - var ( - descChan = make(chan *Desc, capDescChan) - newDescIDs = map[uint64]struct{}{} - newDimHashesByName = map[string]uint64{} - collectorID uint64 // Just a sum of all desc IDs. - duplicateDescErr error - ) +func (r *registry) Register(c Collector) (Collector, error) { + descChan := make(chan *Desc, capDescChan) go func() { c.Describe(descChan) close(descChan) }() + + newDescIDs := map[uint64]struct{}{} + newDimHashesByName := map[string]uint64{} + var collectorID uint64 // Just a sum of all desc IDs. + var duplicateDescErr error + r.mtx.Lock() defer r.mtx.Unlock() - // Conduct various tests... + // Coduct various tests... for desc := range descChan { // Is the descriptor valid at all? if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) } // Is the descID unique? @@ -270,13 +253,13 @@ func (r *Registry) Register(c Collector) error { // First check existing descriptors... if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { if dimHash != desc.dimHash { - return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } } else { // ...then check the new descriptors already seen. if dimHash, exists := newDimHashesByName[desc.fqName]; exists { if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } } else { newDimHashesByName[desc.fqName] = desc.dimHash @@ -285,18 +268,15 @@ func (r *Registry) Register(c Collector) error { } // Did anything happen at all? if len(newDescIDs) == 0 { - return errors.New("collector has no descriptors") + return nil, errors.New("collector has no descriptors") } if existing, exists := r.collectorsByID[collectorID]; exists { - return AlreadyRegisteredError{ - ExistingCollector: existing, - NewCollector: c, - } + return existing, errAlreadyReg } // If the collectorID is new, but at least one of the descs existed // before, we are in trouble. if duplicateDescErr != nil { - return duplicateDescErr + return nil, duplicateDescErr } // Only after all tests have passed, actually register. @@ -307,20 +287,26 @@ func (r *Registry) Register(c Collector) error { for name, dimHash := range newDimHashesByName { r.dimHashesByName[name] = dimHash } - return nil + return c, nil +} + +func (r *registry) RegisterOrGet(m Collector) (Collector, error) { + existing, err := r.Register(m) + if err != nil && err != errAlreadyReg { + return nil, err + } + return existing, nil } -// Unregister implements Registerer. -func (r *Registry) Unregister(c Collector) bool { - var ( - descChan = make(chan *Desc, capDescChan) - descIDs = map[uint64]struct{}{} - collectorID uint64 // Just a sum of the desc IDs. - ) +func (r *registry) Unregister(c Collector) bool { + descChan := make(chan *Desc, capDescChan) go func() { c.Describe(descChan) close(descChan) }() + + descIDs := map[uint64]struct{}{} + var collectorID uint64 // Just a sum of the desc IDs. for desc := range descChan { if _, exists := descIDs[desc.id]; !exists { collectorID += desc.id @@ -347,25 +333,69 @@ func (r *Registry) Unregister(c Collector) bool { return true } -// MustRegister implements Registerer. -func (r *Registry) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { +func (r *registry) Push(job, instance, pushURL, method string) error { + if !strings.Contains(pushURL, "://") { + pushURL = "http://" + pushURL + } + pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job)) + if instance != "" { + pushURL += "/instances/" + url.QueryEscape(instance) + } + buf := r.getBuf() + defer r.giveBuf(buf) + if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil { + if r.panicOnCollectError { + panic(err) + } + return err + } + req, err := http.NewRequest(method, pushURL, buf) + if err != nil { + return err + } + req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL) + } + return nil +} + +func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { + contentType := expfmt.Negotiate(req.Header) + buf := r.getBuf() + defer r.giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil { + if r.panicOnCollectError { panic(err) } + http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError) + return } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) } -// Gather implements Gatherer. -func (r *Registry) Gather() ([]*dto.MetricFamily, error) { - var ( - metricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks - ) +func (r *registry) writePB(encoder expfmt.Encoder) error { + var metricHashes map[uint64]struct{} + if r.collectChecksEnabled { + metricHashes = make(map[uint64]struct{}) + } + metricChan := make(chan Metric, capMetricChan) + wg := sync.WaitGroup{} r.mtx.RLock() metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) @@ -383,21 +413,11 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { collector.Collect(metricChan) }(collector) } - - // In case pedantic checks are enabled, we have to copy the map before - // giving up the RLock. - if r.pedanticChecksEnabled { - registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) - for id := range r.descIDs { - registeredDescIDs[id] = struct{}{} - } - } - r.mtx.RUnlock() // Drain metricChan in case of premature return. defer func() { - for range metricChan { + for _ = range metricChan { } }() @@ -407,267 +427,94 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { // of metricFamiliesByName (and of metricHashes if checks are // enabled). Most likely not worth it. desc := metric.Desc() - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - errs = append(errs, fmt.Errorf( - "error collecting metric %v: %s", desc, err, - )) - continue - } metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { - if metricFamily.GetHelp() != desc.help { - errs = append(errs, fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - )) - continue - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - )) - continue - } - default: - panic("encountered MetricFamily with invalid type") - } - } else { - metricFamily = &dto.MetricFamily{} + if !ok { + metricFamily = r.getMetricFamily() + defer r.giveMetricFamily(metricFamily) metricFamily.Name = proto.String(desc.fqName) metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - errs = append(errs, fmt.Errorf( - "empty metric collected: %s", dtoMetric, - )) - continue - } metricFamiliesByName[desc.fqName] = metricFamily } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { - errs = append(errs, err) - continue + dtoMetric := r.getMetric() + defer r.giveMetric(dtoMetric) + if err := metric.Write(dtoMetric); err != nil { + // TODO: Consider different means of error reporting so + // that a single erroneous metric could be skipped + // instead of blowing up the whole collection. + return fmt.Errorf("error collecting metric %v: %s", desc, err) } - if r.pedanticChecksEnabled { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - errs = append(errs, fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - )) - continue - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - errs = append(errs, err) - continue + switch { + case metricFamily.Type != nil: + // Type already set. We are good. + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if r.collectChecksEnabled { + if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil { + return err } } metricFamily.Metric = append(metricFamily.Metric, dtoMetric) } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} -// Gatherers is a slice of Gatherer instances that implements the Gatherer -// interface itself. Its Gather method calls Gather on all Gatherers in the -// slice in order and returns the merged results. Errors returned from the -// Gather calles are all returned in a flattened MultiError. Duplicate and -// inconsistent Metrics are skipped (first occurrence in slice order wins) and -// reported in the returned error. -// -// Gatherers can be used to merge the Gather results from multiple -// Registries. It also provides a way to directly inject existing MetricFamily -// protobufs into the gathering by creating a custom Gatherer with a Gather -// method that simply returns the existing MetricFamily protobufs. Note that no -// registration is involved (in contrast to Collector registration), so -// obviously registration-time checks cannot happen. Any inconsistencies between -// the gathered MetricFamilies are reported as errors by the Gather method, and -// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies -// (e.g. syntactically invalid metric or label names) will go undetected. -type Gatherers []Gatherer - -// Gather implements Gatherer. -func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { - var ( - metricFamiliesByName = map[string]*dto.MetricFamily{} - metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} - errs MultiError // The collected errors to return in the end. - ) - - for i, g := range gs { - mfs, err := g.Gather() - if err != nil { - if multiErr, ok := err.(MultiError); ok { - for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } - for _, mf := range mfs { + if r.metricFamilyInjectionHook != nil { + for _, mf := range r.metricFamilyInjectionHook() { existingMF, exists := metricFamiliesByName[mf.GetName()] - if exists { - if existingMF.GetHelp() != mf.GetHelp() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has help %q but should have %q", - mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), - )) - continue + if !exists { + metricFamiliesByName[mf.GetName()] = mf + if r.collectChecksEnabled { + for _, m := range mf.Metric { + if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil { + return err + } + } } - if existingMF.GetType() != mf.GetType() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has type %s but should have %s", - mf.GetName(), mf.GetType(), existingMF.GetType(), - )) - continue - } - } else { - existingMF = &dto.MetricFamily{} - existingMF.Name = mf.Name - existingMF.Help = mf.Help - existingMF.Type = mf.Type - metricFamiliesByName[mf.GetName()] = existingMF + continue } for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { - errs = append(errs, err) - continue + if r.collectChecksEnabled { + if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil { + return err + } } existingMF.Metric = append(existingMF.Metric, m) } } } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} -// normalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + // Now that MetricFamilies are all set, sort their Metrics + // lexicographically by their label values. for _, mf := range metricFamiliesByName { sort.Sort(metricSorter(mf.Metric)) } + + // Write out MetricFamilies sorted by their name. names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) - } + for name := range metricFamiliesByName { + names = append(names, name) } sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { - result = append(result, metricFamiliesByName[name]) + if err := encoder.Encode(metricFamiliesByName[name]); err != nil { + return err + } } - return result + return nil } -// checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashed the Metric labels and the MetricFamily -// name. If the resulting hash is alread in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. The provided dimHashes maps -// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes -// doesn't yet contain a hash for the provided MetricFamily, it is -// added. Otherwise, an error is returned if the existing dimHashes in not equal -// the calculated dimHash. -func checkMetricConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - metricHashes map[uint64]struct{}, - dimHashes map[string]uint64, -) error { +func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error { + // Type consistency with metric family. if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || @@ -680,52 +527,43 @@ func checkMetricConsistency( ) } - for _, labelPair := range dtoMetric.GetLabel() { - if !utf8.ValidString(*labelPair.Value) { - return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value) - } - } - // Is the metric unique (i.e. no other metric with the same name and the same label values)? - h := hashNew() - h = hashAdd(h, metricFamily.GetName()) - h = hashAddByte(h, separatorByte) - dh := hashNew() + h := fnv.New64a() + var buf bytes.Buffer + buf.WriteString(metricFamily.GetName()) + buf.WriteByte(separatorByte) + h.Write(buf.Bytes()) // Make sure label pairs are sorted. We depend on it for the consistency - // check. + // check. Label pairs must be sorted by contract. But the point of this + // method is to check for contract violations. So we better do the sort + // now. sort.Sort(LabelPairSorter(dtoMetric.Label)) for _, lp := range dtoMetric.Label { - h = hashAdd(h, lp.GetValue()) - h = hashAddByte(h, separatorByte) - dh = hashAdd(dh, lp.GetName()) - dh = hashAddByte(dh, separatorByte) + buf.Reset() + buf.WriteString(lp.GetValue()) + buf.WriteByte(separatorByte) + h.Write(buf.Bytes()) } - if _, exists := metricHashes[h]; exists { + metricHash := h.Sum64() + if _, exists := metricHashes[metricHash]; exists { return fmt.Errorf( "collected metric %s %s was collected before with the same name and label values", metricFamily.GetName(), dtoMetric, ) } - if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { - if dimHash != dh { - return fmt.Errorf( - "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", - metricFamily.GetName(), dtoMetric, - ) - } - } else { - dimHashes[metricFamily.GetName()] = dh + metricHashes[metricHash] = struct{}{} + + if desc == nil { + return nil // Nothing left to check if we have no desc. } - metricHashes[h] = struct{}{} - return nil -} -func checkDescConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - desc *Desc, -) error { - // Desc help consistency with metric family help. + // Desc consistency with metric family. + if metricFamily.GetName() != desc.fqName { + return fmt.Errorf( + "collected metric %s %s has name %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName, + ) + } if metricFamily.GetHelp() != desc.help { return fmt.Errorf( "collected metric %s %s has help %q but should have %q", @@ -758,5 +596,131 @@ func checkDescConsistency( ) } } + + r.mtx.RLock() // Remaining checks need the read lock. + defer r.mtx.RUnlock() + + // Is the desc registered? + if _, exist := r.descIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + return nil } + +func (r *registry) getBuf() *bytes.Buffer { + select { + case buf := <-r.bufPool: + return buf + default: + return &bytes.Buffer{} + } +} + +func (r *registry) giveBuf(buf *bytes.Buffer) { + buf.Reset() + select { + case r.bufPool <- buf: + default: + } +} + +func (r *registry) getMetricFamily() *dto.MetricFamily { + select { + case mf := <-r.metricFamilyPool: + return mf + default: + return &dto.MetricFamily{} + } +} + +func (r *registry) giveMetricFamily(mf *dto.MetricFamily) { + mf.Reset() + select { + case r.metricFamilyPool <- mf: + default: + } +} + +func (r *registry) getMetric() *dto.Metric { + select { + case m := <-r.metricPool: + return m + default: + return &dto.Metric{} + } +} + +func (r *registry) giveMetric(m *dto.Metric) { + m.Reset() + select { + case r.metricPool <- m: + default: + } +} + +func newRegistry() *registry { + return ®istry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + bufPool: make(chan *bytes.Buffer, numBufs), + metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies), + metricPool: make(chan *dto.Metric, numMetrics), + } +} + +func newDefaultRegistry() *registry { + r := newRegistry() + r.Register(NewProcessCollector(os.Getpid(), "")) + r.Register(NewGoCollector()) + return r +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + return true +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go index d136bba1e78..f30c90c06be 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go @@ -17,30 +17,41 @@ // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file. -package prometheus_test +package prometheus import ( "bytes" + "encoding/binary" "net/http" - "net/http/httptest" "testing" + "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" +) - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" +type fakeResponseWriter struct { + header http.Header + body bytes.Buffer +} - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" -) +func (r *fakeResponseWriter) Header() http.Header { + return r.header +} + +func (r *fakeResponseWriter) Write(d []byte) (l int, err error) { + return r.body.Write(d) +} + +func (r *fakeResponseWriter) WriteHeader(c int) { +} func testHandler(t testing.TB) { - metricVec := prometheus.NewCounterVec( - prometheus.CounterOpts{ + metricVec := NewCounterVec( + CounterOpts{ Name: "name", Help: "docstring", - ConstLabels: prometheus.Labels{"constname": "constvalue"}, + ConstLabels: Labels{"constname": "constvalue"}, }, []string{"labelname"}, ) @@ -48,6 +59,8 @@ func testHandler(t testing.TB) { metricVec.WithLabelValues("val1").Inc() metricVec.WithLabelValues("val2").Inc() + varintBuf := make([]byte, binary.MaxVarintLen32) + externalMetricFamily := &dto.MetricFamily{ Name: proto.String("externalname"), Help: proto.String("externaldocstring"), @@ -70,9 +83,18 @@ func testHandler(t testing.TB) { }, }, } - externalBuf := &bytes.Buffer{} - enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim) - if err := enc.Encode(externalMetricFamily); err != nil { + marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily) + if err != nil { + t.Fatal(err) + } + var externalBuf bytes.Buffer + l := binary.PutUvarint(varintBuf, uint64(len(marshaledExternalMetricFamily))) + _, err = externalBuf.Write(varintBuf[:l]) + if err != nil { + t.Fatal(err) + } + _, err = externalBuf.Write(marshaledExternalMetricFamily) + if err != nil { t.Fatal(err) } externalMetricFamilyAsBytes := externalBuf.Bytes() @@ -138,9 +160,18 @@ metric: < }, }, } - buf := &bytes.Buffer{} - enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) - if err := enc.Encode(expectedMetricFamily); err != nil { + marshaledExpectedMetricFamily, err := proto.Marshal(expectedMetricFamily) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + l = binary.PutUvarint(varintBuf, uint64(len(marshaledExpectedMetricFamily))) + _, err = buf.Write(varintBuf[:l]) + if err != nil { + t.Fatal(err) + } + _, err = buf.Write(marshaledExpectedMetricFamily) + if err != nil { t.Fatal(err) } expectedMetricFamilyAsBytes := buf.Bytes() @@ -185,7 +216,7 @@ metric: < externalMetricFamilyWithSameName := &dto.MetricFamily{ Name: proto.String("name"), - Help: proto.String("docstring"), + Help: proto.String("inconsistent help string does not matter here"), Type: dto.MetricType_COUNTER.Enum(), Metric: []*dto.Metric{ { @@ -209,34 +240,6 @@ metric: < expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > metric: label: counter: > `) - externalMetricFamilyWithInvalidLabelValue := &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("docstring"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - { - Label: []*dto.LabelPair{ - { - Name: proto.String("constname"), - Value: proto.String("\xFF"), - }, - { - Name: proto.String("labelname"), - Value: proto.String("different_val"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(42), - }, - }, - }, - } - - expectedMetricFamilyInvalidLabelValueAsText := []byte(`An error has occurred during metrics gathering: - -collected metric's label constname is not utf8: "\xff" -`) - type output struct { headers map[string]string body []byte @@ -245,7 +248,7 @@ collected metric's label constname is not utf8: "\xff" var scenarios = []struct { headers map[string]string out output - collector prometheus.Collector + collector Collector externalMF []*dto.MetricFamily }{ { // 0 @@ -480,40 +483,23 @@ collected metric's label constname is not utf8: "\xff" externalMetricFamilyWithSameName, }, }, - { // 16 - headers: map[string]string{ - "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `text/plain; charset=utf-8`, - }, - body: expectedMetricFamilyInvalidLabelValueAsText, - }, - collector: metricVec, - externalMF: []*dto.MetricFamily{ - externalMetricFamily, - externalMetricFamilyWithInvalidLabelValue, - }, - }, } for i, scenario := range scenarios { - registry := prometheus.NewPedanticRegistry() - gatherer := prometheus.Gatherer(registry) - if scenario.externalMF != nil { - gatherer = prometheus.Gatherers{ - registry, - prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - return scenario.externalMF, nil - }), - } - } + registry := newRegistry() + registry.collectChecksEnabled = true if scenario.collector != nil { registry.Register(scenario.collector) } - writer := httptest.NewRecorder() - handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) + if scenario.externalMF != nil { + registry.metricFamilyInjectionHook = func() []*dto.MetricFamily { + return scenario.externalMF + } + } + writer := &fakeResponseWriter{ + header: http.Header{}, + } + handler := InstrumentHandler("prometheus", registry) request, _ := http.NewRequest("GET", "/", nil) for key, value := range scenario.headers { request.Header.Add(key, value) @@ -521,7 +507,7 @@ collected metric's label constname is not utf8: "\xff" handler(writer, request) for key, value := range scenario.out.headers { - if writer.HeaderMap.Get(key) != value { + if writer.Header().Get(key) != value { t.Errorf( "%d. expected %q for header %q, got %q", i, value, key, writer.Header().Get(key), @@ -529,10 +515,10 @@ collected metric's label constname is not utf8: "\xff" } } - if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) { + if !bytes.Equal(scenario.out.body, writer.body.Bytes()) { t.Errorf( - "%d. expected body:\n%s\ngot body:\n%s\n", - i, scenario.out.body, writer.Body.Bytes(), + "%d. expected %q for body, got %q", + i, scenario.out.body, writer.body.Bytes(), ) } } @@ -547,44 +533,3 @@ func BenchmarkHandler(b *testing.B) { testHandler(b) } } - -func TestRegisterWithOrGet(t *testing.T) { - // Replace the default registerer just to be sure. This is bad, but this - // whole test will go away once RegisterOrGet is removed. - oldRegisterer := prometheus.DefaultRegisterer - defer func() { - prometheus.DefaultRegisterer = oldRegisterer - }() - prometheus.DefaultRegisterer = prometheus.NewRegistry() - original := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "test", - Help: "help", - }, - []string{"foo", "bar"}, - ) - equalButNotSame := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "test", - Help: "help", - }, - []string{"foo", "bar"}, - ) - var err error - if err = prometheus.Register(original); err != nil { - t.Fatal(err) - } - if err = prometheus.Register(equalButNotSame); err == nil { - t.Fatal("expected error when registringe equal collector") - } - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - if are.ExistingCollector != original { - t.Error("expected original collector but got something else") - } - if are.ExistingCollector == equalButNotSame { - t.Error("expected original callector but got new one") - } - } else { - t.Error("unexpected error:", err) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 21c031e4b7d..fe81e004f67 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -15,6 +15,7 @@ package prometheus import ( "fmt" + "hash/fnv" "math" "sort" "sync" @@ -36,10 +37,7 @@ const quantileLabel = "quantile" // // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. However, the default behavior will change in the -// upcoming v0.10 of the library. There will be no rank estiamtions at all by -// default. For a sane transition, it is recommended to set the desired rank -// estimations explicitly. +// as rank estimations. // // Note that the rank estimations cannot be aggregated in a meaningful way with // the Prometheus query language (i.e. you cannot average or add them). If you @@ -56,11 +54,8 @@ type Summary interface { Observe(float64) } -// DefObjectives are the default Summary quantile values. -// -// Deprecated: DefObjectives will not be used as the default objectives in -// v0.10 of the library. The default Summary will have no quantiles then. var ( + // DefObjectives are the default Summary quantile values. DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} errQuantileLabelNotAllowed = fmt.Errorf( @@ -81,10 +76,8 @@ const ( ) // SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name and Help to a non-empty string. While all other fields -// are optional and can safely be left at their zero value, it is recommended to -// explicitly set the Objectives field to the desired value as the default value -// will change in the upcoming v0.10 of the library. +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with @@ -121,15 +114,9 @@ type SummaryOpts struct { ConstLabels Labels // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported for q - // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is DefObjectives. It is used if Objectives is left at - // its zero value (i.e. nil). To create a Summary without Objectives, - // set it to an empty map (i.e. map[float64]float64{}). - // - // Deprecated: Note that the current value of DefObjectives is - // deprecated. It will be replaced by an empty map in v0.10 of the - // library. Please explicitly set Objectives to the desired value. + // absolute error. If Objectives[q] = e, then the value reported + // for q will be the φ-quantile value for some φ between q-e and q+e. + // The default value is DefObjectives. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant @@ -153,11 +140,11 @@ type SummaryOpts struct { BufCap uint32 } -// Great fuck-up with the sliding-window decay algorithm... The Merge method of -// perk/quantile is actually not working as advertised - and it might be -// unfixable, as the underlying algorithm is apparently not capable of merging -// summaries in the first place. To avoid using Merge, we are currently adding -// observations to _each_ age bucket, i.e. the effort to add a sample is +// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge +// method of perk/quantile is actually not working as advertised - and it might +// be unfixable, as the underlying algorithm is apparently not capable of +// merging summaries in the first place. To avoid using Merge, we are currently +// adding observations to _each_ age bucket, i.e. the effort to add a sample is // essentially multiplied by the number of age buckets. When rotating age // buckets, we empty the previous head stream. On scrape time, we simply take // the quantiles from the head stream (no merging required). Result: More effort @@ -197,7 +184,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { } } - if opts.Objectives == nil { + if len(opts.Objectives) == 0 { opts.Objectives = DefObjectives } @@ -241,12 +228,12 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { } sort.Float64s(s.sortedObjectives) - s.init(s) // Init self-collection. + s.Init(s) // Init self-collection. return s } type summary struct { - selfCollector + SelfCollector bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. mtx sync.Mutex // Protects every other moving part. @@ -404,11 +391,12 @@ func (s quantSort) Less(i, j int) bool { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewSummaryVec. type SummaryVec struct { - *metricVec + MetricVec } // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. +// partitioned by the given label names. At least one label name must be +// provided. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -417,60 +405,35 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { opts.ConstLabels, ) return &SummaryVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) - }), + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }, + }, } } -// GetMetricWithLabelValues returns the Summary for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Summary is created. -// -// It is possible to call this method without using the returned Summary to only -// create the new Summary but leave it at its starting value, a Summary without -// any observations. -// -// Keeping the Summary for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Summary from the SummaryVec. In that case, the -// Summary will still exist, but it will not be exported anymore, even if a -// Summary with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := m.metricVec.getMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Summary and not a +// Metric so that no type conversion is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Observer), err + return metric.(Summary), err } return nil, err } -// GetMetricWith returns the Summary for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Summary is created. Implications of -// creating a Summary without using it and keeping the Summary for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := m.metricVec.getMetricWith(labels) +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Summary and not a Metric so that no +// type conversion is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { + metric, err := m.MetricVec.GetMetricWith(labels) if metric != nil { - return metric.(Observer), err + return metric.(Summary), err } return nil, err } @@ -479,15 +442,15 @@ func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { // GetMetricWithLabelValues would have returned an error. By not returning an // error, WithLabelValues allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *SummaryVec) WithLabelValues(lvs ...string) Observer { - return m.metricVec.withLabelValues(lvs...).(Observer) +func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { + return m.MetricVec.WithLabelValues(lvs...).(Summary) } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. By not returning an error, With allows shortcuts like // myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *SummaryVec) With(labels Labels) Observer { - return m.metricVec.with(labels).(Observer) +func (m *SummaryVec) With(labels Labels) Summary { + return m.MetricVec.With(labels).(Summary) } type constSummary struct { @@ -548,8 +511,8 @@ func NewConstSummary( quantiles map[float64]float64, labelValues ...string, ) (Metric, error) { - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality } return &constSummary{ desc: desc, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go index b162ed94686..0790cdfe727 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go @@ -25,45 +25,6 @@ import ( dto "github.com/prometheus/client_model/go" ) -func TestSummaryWithDefaultObjectives(t *testing.T) { - reg := NewRegistry() - summaryWithDefaultObjectives := NewSummary(SummaryOpts{ - Name: "default_objectives", - Help: "Test help.", - }) - if err := reg.Register(summaryWithDefaultObjectives); err != nil { - t.Error(err) - } - - m := &dto.Metric{} - if err := summaryWithDefaultObjectives.Write(m); err != nil { - t.Error(err) - } - if len(m.GetSummary().Quantile) != len(DefObjectives) { - t.Error("expected default objectives in summary") - } -} - -func TestSummaryWithoutObjectives(t *testing.T) { - reg := NewRegistry() - summaryWithEmptyObjectives := NewSummary(SummaryOpts{ - Name: "empty_objectives", - Help: "Test help.", - Objectives: map[float64]float64{}, - }) - if err := reg.Register(summaryWithEmptyObjectives); err != nil { - t.Error(err) - } - - m := &dto.Metric{} - if err := summaryWithEmptyObjectives.Write(m); err != nil { - t.Error(err) - } - if len(m.GetSummary().Quantile) != 0 { - t.Error("expected no objectives in summary") - } -} - func benchmarkSummaryObserve(w int, b *testing.B) { b.StopTimer() @@ -175,9 +136,8 @@ func TestSummaryConcurrency(t *testing.T) { end.Add(concLevel) sum := NewSummary(SummaryOpts{ - Name: "test_summary", - Help: "helpless", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Name: "test_summary", + Help: "helpless", }) allVars := make([]float64, total) @@ -263,9 +223,8 @@ func TestSummaryVecConcurrency(t *testing.T) { sum := NewSummaryVec( SummaryOpts{ - Name: "test_summary", - Help: "helpless", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + Name: "test_summary", + Help: "helpless", }, []string{"label"}, ) @@ -301,7 +260,7 @@ func TestSummaryVecConcurrency(t *testing.T) { for i := 0; i < vecLength; i++ { m := &dto.Metric{} s := sum.WithLabelValues(string('A' + i)) - s.(Summary).Write(m) + s.Write(m) if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) } @@ -346,7 +305,7 @@ func TestSummaryDecay(t *testing.T) { m := &dto.Metric{} i := 0 tick := time.NewTicker(time.Millisecond) - for range tick.C { + for _ = range tick.C { i++ sum.Observe(float64(i)) if i%10 == 0 { @@ -370,8 +329,8 @@ func TestSummaryDecay(t *testing.T) { } func getBounds(vars []float64, q, ε float64) (min, max float64) { - // TODO(beorn7): This currently tolerates an error of up to 2*ε. The - // error must be at most ε, but for some reason, it's sometimes slightly + // TODO: This currently tolerates an error of up to 2*ε. The error must + // be at most ε, but for some reason, it's sometimes slightly // higher. That's a bug. n := float64(len(vars)) lower := int((q - 2*ε) * n) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go deleted file mode 100644 index b8fc5f18c85..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "time" - -// Timer is a helper type to time functions. Use NewTimer to create new -// instances. -type Timer struct { - begin time.Time - observer Observer -} - -// NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the -// following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } -func NewTimer(o Observer) *Timer { - return &Timer{ - begin: time.Now(), - observer: o, - } -} - -// ObserveDuration records the duration passed since the Timer was created with -// NewTimer. It calls the Observe method of the Observer provided during -// construction with the duration in seconds as an argument. ObserveDuration is -// usually called with a defer statement. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func (t *Timer) ObserveDuration() { - if t.observer != nil { - t.observer.Observe(time.Since(t.begin).Seconds()) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer_test.go b/vendor/github.com/prometheus/client_golang/prometheus/timer_test.go deleted file mode 100644 index 29490206866..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer_test.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "testing" - - dto "github.com/prometheus/client_model/go" -) - -func TestTimerObserve(t *testing.T) { - var ( - his = NewHistogram(HistogramOpts{Name: "test_histogram"}) - sum = NewSummary(SummaryOpts{Name: "test_summary"}) - gauge = NewGauge(GaugeOpts{Name: "test_gauge"}) - ) - - func() { - hisTimer := NewTimer(his) - sumTimer := NewTimer(sum) - gaugeTimer := NewTimer(ObserverFunc(gauge.Set)) - defer hisTimer.ObserveDuration() - defer sumTimer.ObserveDuration() - defer gaugeTimer.ObserveDuration() - }() - - m := &dto.Metric{} - his.Write(m) - if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { - t.Errorf("want %d observations for histogram, got %d", want, got) - } - m.Reset() - sum.Write(m) - if want, got := uint64(1), m.GetSummary().GetSampleCount(); want != got { - t.Errorf("want %d observations for summary, got %d", want, got) - } - m.Reset() - gauge.Write(m) - if got := m.GetGauge().GetValue(); got <= 0 { - t.Errorf("want value > 0 for gauge, got %f", got) - } -} - -func TestTimerEmpty(t *testing.T) { - emptyTimer := NewTimer(nil) - emptyTimer.ObserveDuration() - // Do nothing, just demonstrate it works without panic. -} - -func TestTimerConditionalTiming(t *testing.T) { - var ( - his = NewHistogram(HistogramOpts{ - Name: "test_histogram", - }) - timeMe = true - m = &dto.Metric{} - ) - - timedFunc := func() { - timer := NewTimer(ObserverFunc(func(v float64) { - if timeMe { - his.Observe(v) - } - })) - defer timer.ObserveDuration() - } - - timedFunc() // This will time. - his.Write(m) - if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { - t.Errorf("want %d observations for histogram, got %d", want, got) - } - - timeMe = false - timedFunc() // This will not time again. - m.Reset() - his.Write(m) - if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { - t.Errorf("want %d observations for histogram, got %d", want, got) - } -} - -func TestTimerByOutcome(t *testing.T) { - var ( - his = NewHistogramVec( - HistogramOpts{Name: "test_histogram"}, - []string{"outcome"}, - ) - outcome = "foo" - m = &dto.Metric{} - ) - - timedFunc := func() { - timer := NewTimer(ObserverFunc(func(v float64) { - his.WithLabelValues(outcome).Observe(v) - })) - defer timer.ObserveDuration() - - if outcome == "foo" { - outcome = "bar" - return - } - outcome = "foo" - } - - timedFunc() - his.WithLabelValues("foo").(Histogram).Write(m) - if want, got := uint64(0), m.GetHistogram().GetSampleCount(); want != got { - t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) - } - m.Reset() - his.WithLabelValues("bar").(Histogram).Write(m) - if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { - t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) - } - - timedFunc() - m.Reset() - his.WithLabelValues("foo").(Histogram).Write(m) - if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { - t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) - } - m.Reset() - his.WithLabelValues("bar").(Histogram).Write(m) - if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { - t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) - } - - timedFunc() - m.Reset() - his.WithLabelValues("foo").(Histogram).Write(m) - if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { - t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) - } - m.Reset() - his.WithLabelValues("bar").(Histogram).Write(m) - if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got { - t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) - } - -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go index 0f9ce63f409..c65ab1c5312 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -13,12 +13,115 @@ package prometheus +import "hash/fnv" + +// Untyped is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// An Untyped metric works the same as a Gauge. The only difference is that to +// no type information is implied. +// +// To create Untyped instances, use NewUntyped. +type Untyped interface { + Metric + Collector + + // Set sets the Untyped metric to an arbitrary value. + Set(float64) + // Inc increments the Untyped metric by 1. + Inc() + // Dec decrements the Untyped metric by 1. + Dec() + // Add adds the given value to the Untyped metric. (The value can be + // negative, resulting in a decrease.) + Add(float64) + // Sub subtracts the given value from the Untyped metric. (The value can + // be negative, resulting in an increase.) + Sub(float64) +} + // UntypedOpts is an alias for Opts. See there for doc comments. type UntypedOpts Opts -// UntypedFunc works like GaugeFunc but the collected metric is of type -// "Untyped". UntypedFunc is useful to mirror an external metric of unknown -// type. +// NewUntyped creates a new Untyped metric from the provided UntypedOpts. +func NewUntyped(opts UntypedOpts) Untyped { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, 0) +} + +// UntypedVec is a Collector that bundles a set of Untyped metrics that all +// share the same Desc, but have different values for their variable +// labels. This is used if you want to count the same thing partitioned by +// various dimensions. Create instances with NewUntypedVec. +type UntypedVec struct { + MetricVec +} + +// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &UntypedVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Untyped and not a +// Metric so that no type conversion is required. +func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Untyped and not a Metric so that no +// type conversion is required. +func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { + return m.MetricVec.WithLabelValues(lvs...).(Untyped) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *UntypedVec) With(labels Labels) Untyped { + return m.MetricVec.With(labels).(Untyped) +} + +// UntypedFunc is an Untyped whose value is determined at collect time by +// calling a provided function. // // To create UntypedFunc instances, use NewUntypedFunc. type UntypedFunc interface { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 4a9cca6669b..b54ac11e888 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,11 +14,11 @@ package prometheus import ( + "errors" "fmt" "math" "sort" "sync/atomic" - "time" dto "github.com/prometheus/client_model/go" @@ -36,17 +36,19 @@ const ( UntypedValue ) +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + // value is a generic metric for simple values. It implements Metric, Collector, // Counter, Gauge, and Untyped. Its effective type is determined by // ValueType. This is a low-level building block used by the library to back the // implementations of Counter, Gauge, and Untyped. type value struct { - // valBits contains the bits of the represented float64 value. It has + // valBits containst the bits of the represented float64 value. It has // to go first in the struct to guarantee alignment for atomic // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG valBits uint64 - selfCollector + SelfCollector desc *Desc valType ValueType @@ -66,7 +68,7 @@ func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...strin valBits: math.Float64bits(val), labelPairs: makeLabelPairs(desc, labelValues), } - result.init(result) + result.Init(result) return result } @@ -78,10 +80,6 @@ func (v *value) Set(val float64) { atomic.StoreUint64(&v.valBits, math.Float64bits(val)) } -func (v *value) SetToCurrentTime() { - v.Set(float64(time.Now().UnixNano()) / 1e9) -} - func (v *value) Inc() { v.Add(1) } @@ -115,7 +113,7 @@ func (v *value) Write(out *dto.Metric) error { // library to back the implementations of CounterFunc, GaugeFunc, and // UntypedFunc. type valueFunc struct { - selfCollector + SelfCollector desc *Desc valType ValueType @@ -136,7 +134,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val function: function, labelPairs: makeLabelPairs(desc, nil), } - result.init(result) + result.Init(result) return result } @@ -155,8 +153,8 @@ func (v *valueFunc) Write(out *dto.Metric) error { // the Collect method. NewConstMetric returns an error if the length of // labelValues is not consistent with the variable labels in Desc. func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality } return &constMetric{ desc: desc, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value_test.go b/vendor/github.com/prometheus/client_golang/prometheus/value_test.go deleted file mode 100644 index eed517e7b01..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/value_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package prometheus - -import ( - "fmt" - "testing" -) - -func TestNewConstMetricInvalidLabelValues(t *testing.T) { - testCases := []struct { - desc string - labels Labels - }{ - { - desc: "non utf8 label value", - labels: Labels{"a": "\xFF"}, - }, - { - desc: "not enough label values", - labels: Labels{}, - }, - { - desc: "too many label values", - labels: Labels{"a": "1", "b": "2"}, - }, - } - - for _, test := range testCases { - metricDesc := NewDesc( - "sample_value", - "sample value", - []string{"a"}, - Labels{}, - ) - - expectPanic(t, func() { - MustNewConstMetric(metricDesc, CounterValue, 0.3, "\xFF") - }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) - - if _, err := NewConstMetric(metricDesc, CounterValue, 0.3, "\xFF"); err == nil { - t.Errorf("NewConstMetric: expected error because: %s", test.desc) - } - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 65d13fe1ef0..a1f3bdf37d8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -14,91 +14,125 @@ package prometheus import ( + "bytes" "fmt" + "hash" "sync" - - "github.com/prometheus/common/model" ) -// metricVec is a Collector to bundle metrics of the same name that differ in -// their label values. metricVec is not used directly (and therefore -// unexported). It is used as a building block for implementations of vectors of -// a given metric type, like GaugeVec, CounterVec, SummaryVec, HistogramVec, and -// UntypedVec. -type metricVec struct { - mtx sync.RWMutex // Protects the children. - children map[uint64][]metricWithLabelValues +// MetricVec is a Collector to bundle metrics of the same name that +// differ in their label values. MetricVec is usually not used directly but as a +// building block for implementations of vectors of a given metric +// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already +// provided in this package. +type MetricVec struct { + mtx sync.RWMutex // Protects not only children, but also hash and buf. + children map[uint64]Metric desc *Desc - newMetric func(labelValues ...string) Metric - hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling - hashAddByte func(h uint64, b byte) uint64 -} + // hash is our own hash instance to avoid repeated allocations. + hash hash.Hash64 + // buf is used to copy string contents into it for hashing, + // again to avoid allocations. + buf bytes.Buffer -// newMetricVec returns an initialized metricVec. -func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { - return &metricVec{ - children: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, - hashAdd: hashAdd, - hashAddByte: hashAddByte, - } -} - -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric + newMetric func(labelValues ...string) Metric } // Describe implements Collector. The length of the returned slice // is always one. -func (m *metricVec) Describe(ch chan<- *Desc) { +func (m *MetricVec) Describe(ch chan<- *Desc) { ch <- m.desc } // Collect implements Collector. -func (m *metricVec) Collect(ch chan<- Metric) { +func (m *MetricVec) Collect(ch chan<- Metric) { m.mtx.RLock() defer m.mtx.RUnlock() - for _, metrics := range m.children { - for _, metric := range metrics { - ch <- metric.metric - } + for _, metric := range m.children { + ch <- metric } } -func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created. +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it at its start value (e.g. a Summary or +// Histogram without any observations). See also the SummaryVec example. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + h, err := m.hashLabelValues(lvs) if err != nil { return nil, err } - - return m.getOrCreateMetricWithLabelValues(h, lvs), nil + return m.getOrCreateMetric(h, lvs...), nil } -func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + h, err := m.hashLabels(labels) if err != nil { return nil, err } - - return m.getOrCreateMetricWithLabels(h, labels), nil + lvs := make([]string, len(labels)) + for i, label := range m.desc.variableLabels { + lvs[i] = labels[label] + } + return m.getOrCreateMetric(h, lvs...), nil } -func (m *metricVec) withLabelValues(lvs ...string) Metric { - metric, err := m.getMetricWithLabelValues(lvs...) +// WithLabelValues works as GetMetricWithLabelValues, but panics if an error +// occurs. The method allows neat syntax like: +// httpReqs.WithLabelValues("404", "POST").Inc() +func (m *MetricVec) WithLabelValues(lvs ...string) Metric { + metric, err := m.GetMetricWithLabelValues(lvs...) if err != nil { panic(err) } return metric } -func (m *metricVec) with(labels Labels) Metric { - metric, err := m.getMetricWith(labels) +// With works as GetMetricWith, but panics if an error occurs. The method allows +// neat syntax like: +// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() +func (m *MetricVec) With(labels Labels) Metric { + metric, err := m.GetMetricWith(labels) if err != nil { panic(err) } @@ -110,8 +144,8 @@ func (m *metricVec) with(labels Labels) Metric { // returns true if a metric was deleted. // // It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual metric, so the method will always return false in that +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual Metric, so the method will always return false in that // case. // // Note that for more than one label value, this method is prone to mistakes @@ -120,7 +154,7 @@ func (m *metricVec) with(labels Labels) Metric { // latter has a much more readable (albeit more verbose) syntax, but it comes // with a performance overhead (for creating and processing the Labels map). // See also the CounterVec example. -func (m *metricVec) DeleteLabelValues(lvs ...string) bool { +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { m.mtx.Lock() defer m.mtx.Unlock() @@ -128,20 +162,24 @@ func (m *metricVec) DeleteLabelValues(lvs ...string) bool { if err != nil { return false } - return m.deleteByHashWithLabelValues(h, lvs) + if _, has := m.children[h]; !has { + return false + } + delete(m.children, h) + return true } // Delete deletes the metric where the variable labels are the same as those // passed in as labels. It returns true if a metric was deleted. // // It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. However, such inconsistent Labels -// can never match an actual metric, so the method will always return false in -// that case. +// with those of the VariableLabels in the Desc of the MetricVec. However, such +// inconsistent Labels can never match an actual Metric, so the method will +// always return false in that case. // // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. -func (m *metricVec) Delete(labels Labels) bool { +func (m *MetricVec) Delete(labels Labels) bool { m.mtx.Lock() defer m.mtx.Unlock() @@ -149,55 +187,15 @@ func (m *metricVec) Delete(labels Labels) bool { if err != nil { return false } - - return m.deleteByHashWithLabels(h, labels) -} - -// deleteByHashWithLabelValues removes the metric from the hash bucket h. If -// there are multiple matches in the bucket, use lvs to select a metric and -// remove only that metric. -func (m *metricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { - metrics, ok := m.children[h] - if !ok { + if _, has := m.children[h]; !has { return false } - - i := m.findMetricWithLabelValues(metrics, lvs) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.children, h) - } - return true -} - -// deleteByHashWithLabels removes the metric from the hash bucket h. If there -// are multiple matches in the bucket, use lvs to select a metric and remove -// only that metric. -func (m *metricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { - metrics, ok := m.children[h] - if !ok { - return false - } - i := m.findMetricWithLabels(metrics, labels) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.children, h) - } + delete(m.children, h) return true } // Reset deletes all metrics in this vector. -func (m *metricVec) Reset() { +func (m *MetricVec) Reset() { m.mtx.Lock() defer m.mtx.Unlock() @@ -206,158 +204,44 @@ func (m *metricVec) Reset() { } } -func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)); err != nil { - return 0, err +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if len(vals) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality } - - h := hashNew() + m.hash.Reset() for _, val := range vals { - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) + m.buf.Reset() + m.buf.WriteString(val) + m.hash.Write(m.buf.Bytes()) } - return h, nil + return m.hash.Sum64(), nil } -func (m *metricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)); err != nil { - return 0, err +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if len(labels) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality } - - h := hashNew() + m.hash.Reset() for _, label := range m.desc.variableLabels { val, ok := labels[label] if !ok { return 0, fmt.Errorf("label name %q missing in label map", label) } - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) + m.buf.Reset() + m.buf.WriteString(val) + m.hash.Write(m.buf.Bytes()) } - return h, nil + return m.hash.Sum64(), nil } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs) +func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric { + metric, ok := m.children[hash] if !ok { - // Copy to avoid allocation in case wo don't go down this code path. - copiedLVs := make([]string, len(lvs)) - copy(copiedLVs, lvs) - metric = m.newMetric(copiedLVs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) + // Copy labelValues. Otherwise, they would be allocated even if we don't go + // down this code path. + copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...) + metric = m.newMetric(copiedLabelValues...) + m.children[hash] = metric } return metric } - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabels(hash, labels) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabels(hash, labels) - if !ok { - lvs := m.extractLabelValues(labels) - metric = m.newMetric(lvs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) - } - return metric -} - -// getMetricWithHashAndLabelValues gets a metric while handling possible -// collisions in the hash space. Must be called while holding the read mutex. -func (m *metricVec) getMetricWithHashAndLabelValues(h uint64, lvs []string) (Metric, bool) { - metrics, ok := m.children[h] - if ok { - if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// getMetricWithHashAndLabels gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *metricVec) getMetricWithHashAndLabels(h uint64, labels Labels) (Metric, bool) { - metrics, ok := m.children[h] - if ok { - if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// findMetricWithLabelValues returns the index of the matching metric or -// len(metrics) if not found. -func (m *metricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { - for i, metric := range metrics { - if m.matchLabelValues(metric.values, lvs) { - return i - } - } - return len(metrics) -} - -// findMetricWithLabels returns the index of the matching metric or len(metrics) -// if not found. -func (m *metricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { - for i, metric := range metrics { - if m.matchLabels(metric.values, labels) { - return i - } - } - return len(metrics) -} - -func (m *metricVec) matchLabelValues(values []string, lvs []string) bool { - if len(values) != len(lvs) { - return false - } - for i, v := range values { - if v != lvs[i] { - return false - } - } - return true -} - -func (m *metricVec) matchLabels(values []string, labels Labels) bool { - if len(labels) != len(values) { - return false - } - for i, k := range m.desc.variableLabels { - if values[i] != labels[k] { - return false - } - } - return true -} - -func (m *metricVec) extractLabelValues(labels Labels) []string { - labelValues := make([]string, len(labels)) - for i, k := range m.desc.variableLabels { - labelValues[i] = labels[k] - } - return labelValues -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go index f767f7a7908..0e9431e656f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go @@ -14,42 +14,26 @@ package prometheus import ( - "fmt" + "hash/fnv" "testing" - - dto "github.com/prometheus/client_model/go" ) func TestDelete(t *testing.T) { - vec := NewGaugeVec( - GaugeOpts{ - Name: "test", - Help: "helpless", + desc := NewDesc("test", "helpless", []string{"l1", "l2"}, nil) + vec := MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) }, - []string{"l1", "l2"}, - ) - testDelete(t, vec) -} - -func TestDeleteWithCollisions(t *testing.T) { - vec := NewGaugeVec( - GaugeOpts{ - Name: "test", - Help: "helpless", - }, - []string{"l1", "l2"}, - ) - vec.hashAdd = func(h uint64, s string) uint64 { return 1 } - vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } - testDelete(t, vec) -} + } -func testDelete(t *testing.T, vec *GaugeVec) { if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { t.Errorf("got %v, want %v", got, want) } - vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { t.Errorf("got %v, want %v", got, want) } @@ -57,7 +41,7 @@ func testDelete(t *testing.T, vec *GaugeVec) { t.Errorf("got %v, want %v", got, want) } - vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { t.Errorf("got %v, want %v", got, want) } @@ -65,7 +49,7 @@ func testDelete(t *testing.T, vec *GaugeVec) { t.Errorf("got %v, want %v", got, want) } - vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { t.Errorf("got %v, want %v", got, want) } @@ -75,48 +59,29 @@ func testDelete(t *testing.T, vec *GaugeVec) { } func TestDeleteLabelValues(t *testing.T) { - vec := NewGaugeVec( - GaugeOpts{ - Name: "test", - Help: "helpless", - }, - []string{"l1", "l2"}, - ) - testDeleteLabelValues(t, vec) -} - -func TestDeleteLabelValuesWithCollisions(t *testing.T) { - vec := NewGaugeVec( - GaugeOpts{ - Name: "test", - Help: "helpless", + desc := NewDesc("test", "helpless", []string{"l1", "l2"}, nil) + vec := MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) }, - []string{"l1", "l2"}, - ) - vec.hashAdd = func(h uint64, s string) uint64 { return 1 } - vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } - testDeleteLabelValues(t, vec) -} + } -func testDeleteLabelValues(t *testing.T, vec *GaugeVec) { if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { t.Errorf("got %v, want %v", got, want) } - vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) - vec.With(Labels{"l1": "v1", "l2": "v3"}).(Gauge).Set(42) // Add junk data for collision. + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { t.Errorf("got %v, want %v", got, want) } if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { t.Errorf("got %v, want %v", got, want) } - if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want { - t.Errorf("got %v, want %v", got, want) - } - vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) - // Delete out of order. + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { t.Errorf("got %v, want %v", got, want) } @@ -124,189 +89,3 @@ func testDeleteLabelValues(t *testing.T, vec *GaugeVec) { t.Errorf("got %v, want %v", got, want) } } - -func TestMetricVec(t *testing.T) { - vec := NewGaugeVec( - GaugeOpts{ - Name: "test", - Help: "helpless", - }, - []string{"l1", "l2"}, - ) - testMetricVec(t, vec) -} - -func TestMetricVecWithCollisions(t *testing.T) { - vec := NewGaugeVec( - GaugeOpts{ - Name: "test", - Help: "helpless", - }, - []string{"l1", "l2"}, - ) - vec.hashAdd = func(h uint64, s string) uint64 { return 1 } - vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } - testMetricVec(t, vec) -} - -func testMetricVec(t *testing.T, vec *GaugeVec) { - vec.Reset() // Actually test Reset now! - - var pair [2]string - // Keep track of metrics. - expected := map[[2]string]int{} - - for i := 0; i < 1000; i++ { - pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples. - expected[pair]++ - vec.WithLabelValues(pair[0], pair[1]).Inc() - - expected[[2]string{"v1", "v2"}]++ - vec.WithLabelValues("v1", "v2").(Gauge).Inc() - } - - var total int - for _, metrics := range vec.children { - for _, metric := range metrics { - total++ - copy(pair[:], metric.values) - - var metricOut dto.Metric - if err := metric.metric.Write(&metricOut); err != nil { - t.Fatal(err) - } - actual := *metricOut.Gauge.Value - - var actualPair [2]string - for i, label := range metricOut.Label { - actualPair[i] = *label.Value - } - - // Test output pair against metric.values to ensure we've selected - // the right one. We check this to ensure the below check means - // anything at all. - if actualPair != pair { - t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair) - } - - if actual != float64(expected[pair]) { - t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair]) - } - } - } - - if total != len(expected) { - t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected)) - } - - vec.Reset() - - if len(vec.children) > 0 { - t.Fatalf("reset failed") - } -} - -func TestCounterVecEndToEndWithCollision(t *testing.T) { - vec := NewCounterVec( - CounterOpts{ - Name: "test", - Help: "helpless", - }, - []string{"labelname"}, - ) - vec.WithLabelValues("77kepQFQ8Kl").Inc() - vec.WithLabelValues("!0IC=VloaY").Add(2) - - m := &dto.Metric{} - if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil { - t.Fatal(err) - } - if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want { - t.Errorf("got label value %q, want %q", got, want) - } - if got, want := m.GetCounter().GetValue(), 1.; got != want { - t.Errorf("got value %f, want %f", got, want) - } - m.Reset() - if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil { - t.Fatal(err) - } - if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want { - t.Errorf("got label value %q, want %q", got, want) - } - if got, want := m.GetCounter().GetValue(), 2.; got != want { - t.Errorf("got value %f, want %f", got, want) - } -} - -func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) { - benchmarkMetricVecWithLabelValues(b, map[string][]string{ - "l1": {"onevalue"}, - "l2": {"twovalue"}, - }) -} - -func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) { - benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10) -} - -func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) { - benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10) -} - -func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) { - benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100) -} - -func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) { - benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100) -} - -func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) { - benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000) -} - -func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) { - labels := map[string][]string{} - - for i := 0; i < nkeys; i++ { - var ( - k = fmt.Sprintf("key-%v", i) - vs = make([]string, 0, nvalues) - ) - for j := 0; j < nvalues; j++ { - vs = append(vs, fmt.Sprintf("value-%v", j)) - } - labels[k] = vs - } - - benchmarkMetricVecWithLabelValues(b, labels) -} - -func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) { - var keys []string - for k := range labels { // Map order dependent, who cares though. - keys = append(keys, k) - } - - values := make([]string, len(labels)) // Value cache for permutations. - vec := NewGaugeVec( - GaugeOpts{ - Name: "test", - Help: "helpless", - }, - keys, - ) - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - // Varies input across provide map entries based on key size. - for j, k := range keys { - candidates := labels[k] - values[j] = candidates[i%len(candidates)] - } - - vec.WithLabelValues(values...) - } -} diff --git a/vendor/github.com/spf13/viper/.gitignore b/vendor/github.com/spf13/viper/.gitignore index 352a34a566c..836562412fe 100644 --- a/vendor/github.com/spf13/viper/.gitignore +++ b/vendor/github.com/spf13/viper/.gitignore @@ -21,4 +21,3 @@ _testmain.go *.exe *.test -*.bench \ No newline at end of file diff --git a/vendor/github.com/spf13/viper/.travis.yml b/vendor/github.com/spf13/viper/.travis.yml index f1deac3d74f..e793edbab85 100644 --- a/vendor/github.com/spf13/viper/.travis.yml +++ b/vendor/github.com/spf13/viper/.travis.yml @@ -2,8 +2,9 @@ go_import_path: github.com/spf13/viper language: go go: - - 1.7.5 - - 1.8 + - 1.5.4 + - 1.6.3 + - 1.7 - tip os: @@ -17,7 +18,6 @@ matrix: script: - go install ./... - - diff -u <(echo -n) <(gofmt -d .) - go test -v ./... after_success: diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index 848d92d6b87..4ebd8ddb815 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -6,19 +6,18 @@ Many Go projects are built using Viper including: * [Hugo](http://gohugo.io) * [EMC RexRay](http://rexray.readthedocs.org/en/stable/) -* [Imgur’s Incus](https://github.com/Imgur/incus) +* [Imgur's Incus](https://github.com/Imgur/incus) * [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) * [Docker Notary](https://github.com/docker/Notary) * [BloomApi](https://www.bloomapi.com/) -* [doctl](https://github.com/digitalocean/doctl) -* [Clairctl](https://github.com/jgsqware/clairctl) +* [doctl(https://github.com/digitalocean/doctl) -[![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![GoDoc](https://godoc.org/github.com/spf13/viper?status.svg)](https://godoc.org/github.com/spf13/viper) + [![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) ## What is Viper? -Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed +Viper is a complete configuration solution for go applications including 12 factor apps. It is designed to work within an application, and can handle all types of configuration needs and formats. It supports: @@ -69,7 +68,7 @@ Viper configuration keys are case insensitive. ### Establishing Defaults A good configuration system will support default values. A default value is not -required for a key, but it’s useful in the event that a key hasn’t been set via +required for a key, but it's useful in the event that a key hasn’t been set via config file, environment variable, remote configuration or flag. Examples: @@ -111,16 +110,16 @@ Gone are the days of needing to restart a server to have a config take effect, viper powered applications can read an update to a config file while running and not miss a beat. -Simply tell the viper instance to watchConfig. +Simply tell the viper instance to watchConfig. Optionally you can provide a function for Viper to run each time a change occurs. **Make sure you add all of the configPaths prior to calling `WatchConfig()`** ```go -viper.WatchConfig() -viper.OnConfigChange(func(e fsnotify.Event) { - fmt.Println("Config file changed:", e.Name) -}) + viper.WatchConfig() + viper.OnConfigChange(func(e fsnotify.Event) { + fmt.Println("Config file changed:", e.Name) + }) ``` ### Reading Config from io.Reader @@ -237,7 +236,7 @@ Like `BindEnv`, the value is not set when the binding method is called, but when it is accessed. This means you can bind as early as you want, even in an `init()` function. -For individual flags, the `BindPFlag()` method provides this functionality. +The `BindPFlag()` method provides this functionality. Example: @@ -246,19 +245,6 @@ serverCmd.Flags().Int("port", 1138, "Port to run Application server on") viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) ``` -You can also bind an existing set of pflags (pflag.FlagSet): - -Example: - -```go -pflag.Int("flagname", 1234, "help message for flagname") - -pflag.Parse() -viper.BindPFlags(pflag.CommandLine) - -i := viper.GetInt("flagname") // retrieve values from viper instead of pflag -``` - The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude the use of other packages that use the [flag](https://golang.org/pkg/flag/) package from the standard library. The pflag package can handle the flags @@ -277,32 +263,24 @@ import ( ) func main() { - - // using standard library "flag" package - flag.Int("flagname", 1234, "help message for flagname") - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() - viper.BindPFlags(pflag.CommandLine) - - i := viper.GetInt("flagname") // retrieve value from viper - - ... + ... } ``` #### Flag interfaces -Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`. +Viper provides two Go interfaces to bind other flag systems if you don't use `Pflags`. `FlagValue` represents a single flag. This is a very simple example on how to implement this interface: ```go type myFlag struct {} -func (f myFlag) HasChanged() bool { return false } -func (f myFlag) Name() string { return "my-flag-name" } -func (f myFlag) ValueString() string { return "my-flag-value" } -func (f myFlag) ValueType() string { return "string" } +func (f myFlag) IsChanged() { return false } +func (f myFlag) Name() { return "my-flag-name" } +func (f myFlag) ValueString() { return "my-flag-value" } +func (f myFlag) ValueType() { return "string" } ``` Once your flag implements this interface, you can simply tell Viper to bind it: @@ -320,7 +298,7 @@ type myFlagSet struct { func (f myFlagSet) VisitAll(fn func(FlagValue)) { for _, flag := range flags { - fn(flag) + fn(flag) } } ``` @@ -423,7 +401,7 @@ go func(){ ## Getting Values From Viper -In Viper, there are a few ways to get a value depending on the value’s type. +In Viper, there are a few ways to get a value depending on the value's type. The following functions and methods exist: * `Get(key string) : interface{}` @@ -480,17 +458,16 @@ Viper can access a nested field by passing a `.` delimited path of keys: GetString("datastore.metric.host") // (returns "127.0.0.1") ``` -This obeys the precedence rules established above; the search for the path -will cascade through the remaining configuration registries until found. +This obeys the precedence rules established above; the search for the root key +(in this example, `datastore`) will cascade through the remaining configuration +registries until found. The search for the sub-keys (`metric` and `host`), +however, will not. -For example, given this configuration file, both `datastore.metric.host` and -`datastore.metric.port` are already defined (and may be overridden). If in addition -`datastore.metric.protocol` was defined in the defaults, Viper would also find it. +For example, if the `metric` key was not defined in the configuration loaded +from file, but was defined in the defaults, Viper would return the zero value. -However, if `datastore.metric` was overridden (by a flag, an environment variable, -the `Set()` method, …) with an immediate value, then all sub-keys of -`datastore.metric` become undefined, they are “shadowed” by the higher-priority -configuration level. +On the other hand, if the primary key was not defined, Viper would go through +the remaining registries looking for it. Lastly, if there exists a key that matches the delimited key path, its value will be returned instead. E.g. @@ -514,7 +491,7 @@ will be returned instead. E.g. } } -GetString("datastore.metric.host") // returns "0.0.0.0" +GetString("datastore.metric.host") //returns "0.0.0.0" ``` ### Extract sub-tree @@ -553,7 +530,7 @@ func NewCache(cfg *Viper) *Cache {...} ``` which creates a cache based on config information formatted as `subv`. -Now it’s easy to create these 2 caches separately as: +Now it's easy to create these 2 caches separately as: ```go cfg1 := viper.Sub("app.cache1") @@ -597,13 +574,13 @@ initialization needed to begin using Viper. Since most applications will want to use a single central repository for their configuration, the viper package provides this. It is similar to a singleton. -In all of the examples above, they demonstrate using viper in its singleton +In all of the examples above, they demonstrate using viper in it's singleton style approach. ### Working with multiple vipers You can also create many different vipers for use in your application. Each will -have its own unique set of configurations and values. Each can read from a +have it’s own unique set of configurations and values. Each can read from a different config file, key value store, etc. All of the functions that viper package supports are mirrored as methods on a viper. diff --git a/vendor/github.com/spf13/viper/flags_test.go b/vendor/github.com/spf13/viper/flags_test.go index 0b976b60523..5489278dbda 100644 --- a/vendor/github.com/spf13/viper/flags_test.go +++ b/vendor/github.com/spf13/viper/flags_test.go @@ -22,7 +22,7 @@ func TestBindFlagValueSet(t *testing.T) { "endpoint": "/public", } - for name := range testValues { + for name, _ := range testValues { testValues[name] = flagSet.String(name, "", "test") } @@ -62,4 +62,5 @@ func TestBindFlagValue(t *testing.T) { flag.Changed = true //hack for pflag usage assert.Equal(t, "testing_mutate", Get("testvalue")) + } diff --git a/vendor/github.com/spf13/viper/overrides_test.go b/vendor/github.com/spf13/viper/overrides_test.go deleted file mode 100644 index dd2aa9b0dbd..00000000000 --- a/vendor/github.com/spf13/viper/overrides_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package viper - -import ( - "fmt" - "strings" - "testing" - - "github.com/spf13/cast" - "github.com/stretchr/testify/assert" -) - -type layer int - -const ( - defaultLayer layer = iota + 1 - overrideLayer -) - -func TestNestedOverrides(t *testing.T) { - assert := assert.New(t) - var v *Viper - - // Case 0: value overridden by a value - overrideDefault(assert, "tom", 10, "tom", 20) // "tom" is first given 10 as default value, then overridden by 20 - override(assert, "tom", 10, "tom", 20) // "tom" is first given value 10, then overridden by 20 - overrideDefault(assert, "tom.age", 10, "tom.age", 20) - override(assert, "tom.age", 10, "tom.age", 20) - overrideDefault(assert, "sawyer.tom.age", 10, "sawyer.tom.age", 20) - override(assert, "sawyer.tom.age", 10, "sawyer.tom.age", 20) - - // Case 1: key:value overridden by a value - v = overrideDefault(assert, "tom.age", 10, "tom", "boy") // "tom.age" is first given 10 as default value, then "tom" is overridden by "boy" - assert.Nil(v.Get("tom.age")) // "tom.age" should not exist anymore - v = override(assert, "tom.age", 10, "tom", "boy") - assert.Nil(v.Get("tom.age")) - - // Case 2: value overridden by a key:value - overrideDefault(assert, "tom", "boy", "tom.age", 10) // "tom" is first given "boy" as default value, then "tom" is overridden by map{"age":10} - override(assert, "tom.age", 10, "tom", "boy") - - // Case 3: key:value overridden by a key:value - v = overrideDefault(assert, "tom.size", 4, "tom.age", 10) - assert.Equal(4, v.Get("tom.size")) // value should still be reachable - v = override(assert, "tom.size", 4, "tom.age", 10) - assert.Equal(4, v.Get("tom.size")) - deepCheckValue(assert, v, overrideLayer, []string{"tom", "size"}, 4) - - // Case 4: key:value overridden by a map - v = overrideDefault(assert, "tom.size", 4, "tom", map[string]interface{}{"age": 10}) // "tom.size" is first given "4" as default value, then "tom" is overridden by map{"age":10} - assert.Equal(4, v.Get("tom.size")) // "tom.size" should still be reachable - assert.Equal(10, v.Get("tom.age")) // new value should be there - deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, 10) // new value should be there - v = override(assert, "tom.size", 4, "tom", map[string]interface{}{"age": 10}) - assert.Nil(v.Get("tom.size")) - assert.Equal(10, v.Get("tom.age")) - deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, 10) - - // Case 5: array overridden by a value - overrideDefault(assert, "tom", []int{10, 20}, "tom", 30) - override(assert, "tom", []int{10, 20}, "tom", 30) - overrideDefault(assert, "tom.age", []int{10, 20}, "tom.age", 30) - override(assert, "tom.age", []int{10, 20}, "tom.age", 30) - - // Case 6: array overridden by an array - overrideDefault(assert, "tom", []int{10, 20}, "tom", []int{30, 40}) - override(assert, "tom", []int{10, 20}, "tom", []int{30, 40}) - overrideDefault(assert, "tom.age", []int{10, 20}, "tom.age", []int{30, 40}) - v = override(assert, "tom.age", []int{10, 20}, "tom.age", []int{30, 40}) - // explicit array merge: - s, ok := v.Get("tom.age").([]int) - if assert.True(ok, "tom[\"age\"] is not a slice") { - v.Set("tom.age", append(s, []int{50, 60}...)) - assert.Equal([]int{30, 40, 50, 60}, v.Get("tom.age")) - deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, []int{30, 40, 50, 60}) - } -} - -func overrideDefault(assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper { - return overrideFromLayer(defaultLayer, assert, firstPath, firstValue, secondPath, secondValue) -} -func override(assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper { - return overrideFromLayer(overrideLayer, assert, firstPath, firstValue, secondPath, secondValue) -} - -// overrideFromLayer performs the sequential override and low-level checks. -// -// First assignment is made on layer l for path firstPath with value firstValue, -// the second one on the override layer (i.e., with the Set() function) -// for path secondPath with value secondValue. -// -// firstPath and secondPath can include an arbitrary number of dots to indicate -// a nested element. -// -// After each assignment, the value is checked, retrieved both by its full path -// and by its key sequence (successive maps). -func overrideFromLayer(l layer, assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper { - v := New() - firstKeys := strings.Split(firstPath, v.keyDelim) - if assert == nil || - len(firstKeys) == 0 || len(firstKeys[0]) == 0 { - return v - } - - // Set and check first value - switch l { - case defaultLayer: - v.SetDefault(firstPath, firstValue) - case overrideLayer: - v.Set(firstPath, firstValue) - default: - return v - } - assert.Equal(firstValue, v.Get(firstPath)) - deepCheckValue(assert, v, l, firstKeys, firstValue) - - // Override and check new value - secondKeys := strings.Split(secondPath, v.keyDelim) - if len(secondKeys) == 0 || len(secondKeys[0]) == 0 { - return v - } - v.Set(secondPath, secondValue) - assert.Equal(secondValue, v.Get(secondPath)) - deepCheckValue(assert, v, overrideLayer, secondKeys, secondValue) - - return v -} - -// deepCheckValue checks that all given keys correspond to a valid path in the -// configuration map of the given layer, and that the final value equals the one given -func deepCheckValue(assert *assert.Assertions, v *Viper, l layer, keys []string, value interface{}) { - if assert == nil || v == nil || - len(keys) == 0 || len(keys[0]) == 0 { - return - } - - // init - var val interface{} - var ms string - switch l { - case defaultLayer: - val = v.defaults - ms = "v.defaults" - case overrideLayer: - val = v.override - ms = "v.override" - } - - // loop through map - var m map[string]interface{} - err := false - for _, k := range keys { - if val == nil { - assert.Fail(fmt.Sprintf("%s is not a map[string]interface{}", ms)) - return - } - - // deep scan of the map to get the final value - switch val.(type) { - case map[interface{}]interface{}: - m = cast.ToStringMap(val) - case map[string]interface{}: - m = val.(map[string]interface{}) - default: - assert.Fail(fmt.Sprintf("%s is not a map[string]interface{}", ms)) - return - } - ms = ms + "[\"" + k + "\"]" - val = m[k] - } - if !err { - assert.Equal(value, val) - } -} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index 3ebada91abd..fe6cb459463 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -29,68 +29,23 @@ import ( "gopkg.in/yaml.v2" ) -// ConfigParseError denotes failing to parse configuration file. +// Denotes failing to parse configuration file. type ConfigParseError struct { err error } -// Error returns the formatted configuration error. +// Returns the formatted configuration error. func (pe ConfigParseError) Error() string { return fmt.Sprintf("While parsing config: %s", pe.err.Error()) } -// toCaseInsensitiveValue checks if the value is a map; -// if so, create a copy and lower-case the keys recursively. -func toCaseInsensitiveValue(value interface{}) interface{} { - switch v := value.(type) { - case map[interface{}]interface{}: - value = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]interface{}: - value = copyAndInsensitiviseMap(v) - } - - return value -} - -// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of -// any map it makes case insensitive. -func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { - nm := make(map[string]interface{}) - - for key, val := range m { - lkey := strings.ToLower(key) - switch v := val.(type) { - case map[interface{}]interface{}: - nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]interface{}: - nm[lkey] = copyAndInsensitiviseMap(v) - default: - nm[lkey] = v - } - } - - return nm -} - func insensitiviseMap(m map[string]interface{}) { for key, val := range m { - switch val.(type) { - case map[interface{}]interface{}: - // nested map: cast and recursively insensitivise - val = cast.ToStringMap(val) - insensitiviseMap(val.(map[string]interface{})) - case map[string]interface{}: - // nested map: recursively insensitivise - insensitiviseMap(val.(map[string]interface{})) - } - lower := strings.ToLower(key) if key != lower { - // remove old key (not lower-cased) delete(m, key) + m[lower] = val } - // update map - m[lower] = val } } @@ -113,10 +68,10 @@ func absPathify(inPath string) string { p, err := filepath.Abs(inPath) if err == nil { return filepath.Clean(p) + } else { + jww.ERROR.Println("Couldn't discover absolute path") + jww.ERROR.Println(err) } - - jww.ERROR.Println("Couldn't discover absolute path") - jww.ERROR.Println(err) return "" } @@ -152,6 +107,29 @@ func userHomeDir() string { return os.Getenv("HOME") } +func findCWD() (string, error) { + serverFile, err := filepath.Abs(os.Args[0]) + + if err != nil { + return "", fmt.Errorf("Can't get absolute path for executable: %v", err) + } + + path := filepath.Dir(serverFile) + realFile, err := filepath.EvalSymlinks(serverFile) + + if err != nil { + if _, err = os.Stat(serverFile + ".exe"); err == nil { + realFile = filepath.Clean(serverFile + ".exe") + } + } + + if err == nil && realFile != serverFile { + path = filepath.Dir(realFile) + } + + return path, nil +} + func unmarshallConfigReader(in io.Reader, c map[string]interface{}, configType string) error { buf := new(bytes.Buffer) buf.ReadFrom(in) @@ -194,12 +172,7 @@ func unmarshallConfigReader(in io.Reader, c map[string]interface{}, configType s } for _, key := range p.Keys() { value, _ := p.Get(key) - // recursively build nested maps - path := strings.Split(key, ".") - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(c, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value + c[key] = value } } @@ -249,34 +222,3 @@ func parseSizeInBytes(sizeStr string) uint { return safeMul(uint(size), multiplier) } - -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]interface{}) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]interface{}) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]interface{}) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} diff --git a/vendor/github.com/spf13/viper/util_test.go b/vendor/github.com/spf13/viper/util_test.go deleted file mode 100644 index 0af80bb635b..00000000000 --- a/vendor/github.com/spf13/viper/util_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Viper is a application configuration system. -// It believes that applications can be configured a variety of ways -// via flags, ENVIRONMENT variables, configuration files retrieved -// from the file system, or a remote key/value store. - -package viper - -import ( - "reflect" - "testing" -) - -func TestCopyAndInsensitiviseMap(t *testing.T) { - var ( - given = map[string]interface{}{ - "Foo": 32, - "Bar": map[interface{}]interface { - }{ - "ABc": "A", - "cDE": "B"}, - } - expected = map[string]interface{}{ - "foo": 32, - "bar": map[string]interface { - }{ - "abc": "A", - "cde": "B"}, - } - ) - - got := copyAndInsensitiviseMap(given) - - if !reflect.DeepEqual(got, expected) { - t.Fatalf("Got %q\nexpected\n%q", got, expected) - } - - if _, ok := given["foo"]; ok { - t.Fatal("Input map changed") - } - - if _, ok := given["bar"]; ok { - t.Fatal("Input map changed") - } - - m := given["Bar"].(map[interface{}]interface{}) - if _, ok := m["ABc"]; !ok { - t.Fatal("Input map changed") - } -} diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 2a221e53cb0..f17790e7607 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -21,7 +21,6 @@ package viper import ( "bytes" - "encoding/csv" "fmt" "io" "log" @@ -41,11 +40,6 @@ import ( var v *Viper -type RemoteResponse struct { - Value []byte - Error error -} - func init() { v = New() } @@ -53,45 +47,45 @@ func init() { type remoteConfigFactory interface { Get(rp RemoteProvider) (io.Reader, error) Watch(rp RemoteProvider) (io.Reader, error) - WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) } // RemoteConfig is optional, see the remote package var RemoteConfig remoteConfigFactory -// UnsupportedConfigError denotes encountering an unsupported +// Denotes encountering an unsupported // configuration filetype. type UnsupportedConfigError string -// Error returns the formatted configuration error. +// Returns the formatted configuration error. func (str UnsupportedConfigError) Error() string { return fmt.Sprintf("Unsupported Config Type %q", string(str)) } -// UnsupportedRemoteProviderError denotes encountering an unsupported remote -// provider. Currently only etcd and Consul are supported. +// Denotes encountering an unsupported remote +// provider. Currently only etcd and Consul are +// supported. type UnsupportedRemoteProviderError string -// Error returns the formatted remote provider error. +// Returns the formatted remote provider error. func (str UnsupportedRemoteProviderError) Error() string { return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) } -// RemoteConfigError denotes encountering an error while trying to +// Denotes encountering an error while trying to // pull the configuration from the remote provider. type RemoteConfigError string -// Error returns the formatted remote provider error +// Returns the formatted remote provider error func (rce RemoteConfigError) Error() string { return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) } -// ConfigFileNotFoundError denotes failing to find configuration file. +// Denotes failing to find configuration file. type ConfigFileNotFoundError struct { name, locations string } -// Error returns the formatted configuration error. +// Returns the formatted configuration error. func (fnfe ConfigFileNotFoundError) Error() string { return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations) } @@ -113,11 +107,11 @@ func (fnfe ConfigFileNotFoundError) Error() string { // Defaults : { // "secret": "", // "user": "default", -// "endpoint": "https://localhost" +// "endpoint": "https://localhost" // } // Config : { // "user": "root" -// "secret": "defaultsecret" +// "secret": "defaultsecret" // } // Env : { // "secret": "somesecretkey" @@ -165,7 +159,7 @@ type Viper struct { onConfigChange func(fsnotify.Event) } -// New returns an initialized Viper instance. +// Returns an initialized Viper instance. func New() *Viper { v := new(Viper) v.keyDelim = "." @@ -226,11 +220,11 @@ type RemoteProvider interface { SecretKeyring() string } -// SupportedExts are universally supported extensions. -var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"} +// Universally supported extensions. +var SupportedExts []string = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"} -// SupportedRemoteProviders are universally supported remote providers. -var SupportedRemoteProviders = []string{"etcd", "consul"} +// Universally supported remote providers. +var SupportedRemoteProviders []string = []string{"etcd", "consul"} func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { @@ -247,13 +241,7 @@ func (v *Viper) WatchConfig() { defer watcher.Close() // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way - filename, err := v.getConfigFile() - if err != nil { - log.Println("error:", err) - return - } - - configFile := filepath.Clean(filename) + configFile := filepath.Clean(v.getConfigFile()) configDir, _ := filepath.Split(configFile) done := make(chan bool) @@ -282,8 +270,8 @@ func (v *Viper) WatchConfig() { }() } -// SetConfigFile explicitly defines the path, name and extension of the config file. -// Viper will use this and not check any of the config paths. +// Explicitly define the path, name and extension of the config file +// Viper will use this and not check any of the config paths func SetConfigFile(in string) { v.SetConfigFile(in) } func (v *Viper) SetConfigFile(in string) { if in != "" { @@ -291,9 +279,9 @@ func (v *Viper) SetConfigFile(in string) { } } -// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use. -// E.g. if your prefix is "spf", the env registry will look for env -// variables that start with "SPF_". +// Define a prefix that ENVIRONMENT variables will use. +// E.g. if your prefix is "spf", the env registry +// will look for env. variables that start with "SPF_" func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } func (v *Viper) SetEnvPrefix(in string) { if in != "" { @@ -311,11 +299,11 @@ func (v *Viper) mergeWithEnvPrefix(in string) string { // TODO: should getEnv logic be moved into find(). Can generalize the use of // rewriting keys many things, Ex: Get('someKey') -> some_key -// (camel case to snake case for JSON keys perhaps) +// (cammel case to snake case for JSON keys perhaps) -// getEnv is a wrapper around os.Getenv which replaces characters in the original -// key. This allows env vars which have different keys than the config object -// keys. +// getEnv s a wrapper around os.Getenv which replaces characters in the original +// key. This allows env vars which have different keys then the config object +// keys func (v *Viper) getEnv(key string) string { if v.envKeyReplacer != nil { key = v.envKeyReplacer.Replace(key) @@ -323,11 +311,11 @@ func (v *Viper) getEnv(key string) string { return os.Getenv(key) } -// ConfigFileUsed returns the file used to populate the config registry. +// Return the file used to populate the config registry func ConfigFileUsed() string { return v.ConfigFileUsed() } func (v *Viper) ConfigFileUsed() string { return v.configFile } -// AddConfigPath adds a path for Viper to search for the config file in. +// Add a path for Viper to search for the config file in. // Can be called multiple times to define multiple search paths. func AddConfigPath(in string) { v.AddConfigPath(in) } func (v *Viper) AddConfigPath(in string) { @@ -411,22 +399,23 @@ func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { return false } -// searchMap recursively searches for a value for path in source map. -// Returns nil if not found. -// Note: This assumes that the path entries and map keys are lower cased. func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} { + if len(path) == 0 { return source } - next, ok := source[path[0]] - if ok { - // Fast path - if len(path) == 1 { - return next + var ok bool + var next interface{} + for k, v := range source { + if strings.ToLower(k) == strings.ToLower(path[0]) { + ok = true + next = v + break } + } - // Nested case + if ok { switch next.(type) { case map[interface{}]interface{}: return v.searchMap(cast.ToStringMap(next), path[1:]) @@ -435,126 +424,11 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac // if the type of `next` is the same as the type being asserted return v.searchMap(next.(map[string]interface{}), path[1:]) default: - // got a value but nested key expected, return "nil" for not found - return nil - } - } - return nil -} - -// searchMapWithPathPrefixes recursively searches for a value for path in source map. -// -// While searchMap() considers each path element as a single map key, this -// function searches for, and prioritizes, merged path elements. -// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" -// is also defined, this latter value is returned for path ["foo", "bar"]. -// -// This should be useful only at config level (other maps may not contain dots -// in their keys). -// -// Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} { - if len(path) == 0 { - return source - } - - // search for path prefixes, starting from the longest one - for i := len(path); i > 0; i-- { - prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) - - next, ok := source[prefixKey] - if ok { - // Fast path - if i == len(path) { - return next - } - - // Nested case - var val interface{} - switch next.(type) { - case map[interface{}]interface{}: - val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:]) - case map[string]interface{}: - // Type assertion is safe here since it is only reached - // if the type of `next` is the same as the type being asserted - val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:]) - default: - // got a value but nested key expected, do nothing and look for next prefix - } - if val != nil { - return val - } - } - } - - // not found - return nil -} - -// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere -// on its path in the map. -// e.g., if "foo.bar" has a value in the given map, it “shadows” -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string { - var parentVal interface{} - for i := 1; i < len(path); i++ { - parentVal = v.searchMap(m, path[0:i]) - if parentVal == nil { - // not found, no need to add more path elements - return "" - } - switch parentVal.(type) { - case map[interface{}]interface{}: - continue - case map[string]interface{}: - continue - default: - // parentVal is a regular value which shadows "path" - return strings.Join(path[0:i], v.keyDelim) - } - } - return "" -} - -// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere -// in a sub-path of the map. -// e.g., if "foo.bar" has a value in the given map, it “shadows” -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { - // unify input map - var m map[string]interface{} - switch mi.(type) { - case map[string]string, map[string]FlagValue: - m = cast.ToStringMap(mi) - default: - return "" - } - - // scan paths - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if _, ok := m[parentKey]; ok { - return parentKey - } - } - return "" -} - -// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere -// in the environment, when automatic env is on. -// e.g., if "foo.bar" has a value in the environment, it “shadows” -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInAutoEnv(path []string) string { - var parentKey string - var val string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if val = v.getEnv(v.mergeWithEnvPrefix(parentKey)); val != "" { - return parentKey + return next } + } else { + return nil } - return "" } // SetTypeByDefaultValue enables or disables the inference of a key value's @@ -581,8 +455,8 @@ func GetViper() *Viper { return v } -// Get can retrieve any value given the key to use. -// Get is case-insensitive for a key. +// Viper is essentially repository for configurations +// Get can retrieve any value given the key to use // Get has the behavior of returning the value associated with the first // place from where it is set. Viper will check in the following order: // override, flag, env, config file, key/value store, default @@ -590,126 +464,151 @@ func GetViper() *Viper { // Get returns an interface. For a specific value use one of the Get____ methods. func Get(key string) interface{} { return v.Get(key) } func (v *Viper) Get(key string) interface{} { + path := strings.Split(key, v.keyDelim) + lcaseKey := strings.ToLower(key) val := v.find(lcaseKey) + if val == nil { - return nil + source := v.find(strings.ToLower(path[0])) + if source != nil { + if reflect.TypeOf(source).Kind() == reflect.Map { + val = v.searchMap(cast.ToStringMap(source), path[1:]) + } + } } - if v.typeByDefValue { - // TODO(bep) this branch isn't covered by a single test. - valType := val - path := strings.Split(lcaseKey, v.keyDelim) - defVal := v.searchMap(v.defaults, path) - if defVal != nil { - valType = defVal + // if no other value is returned and a flag does exist for the value, + // get the flag's value even if the flag's value has not changed + if val == nil { + if flag, exists := v.pflags[lcaseKey]; exists { + jww.TRACE.Println(key, "get pflag default", val) + switch flag.ValueType() { + case "int", "int8", "int16", "int32", "int64": + val = cast.ToInt(flag.ValueString()) + case "bool": + val = cast.ToBool(flag.ValueString()) + default: + val = flag.ValueString() + } } + } + + if val == nil { + return nil + } - switch valType.(type) { - case bool: - return cast.ToBool(val) - case string: - return cast.ToString(val) - case int64, int32, int16, int8, int: - return cast.ToInt(val) - case float64, float32: - return cast.ToFloat64(val) - case time.Time: - return cast.ToTime(val) - case time.Duration: - return cast.ToDuration(val) - case []string: - return cast.ToStringSlice(val) + var valType interface{} + if !v.typeByDefValue { + valType = val + } else { + defVal, defExists := v.defaults[lcaseKey] + if defExists { + valType = defVal + } else { + valType = val } } + switch valType.(type) { + case bool: + return cast.ToBool(val) + case string: + return cast.ToString(val) + case int64, int32, int16, int8, int: + return cast.ToInt(val) + case float64, float32: + return cast.ToFloat64(val) + case time.Time: + return cast.ToTime(val) + case time.Duration: + return cast.ToDuration(val) + case []string: + return cast.ToStringSlice(val) + } return val } -// Sub returns new Viper instance representing a sub tree of this instance. -// Sub is case-insensitive for a key. +// Returns new Viper instance representing a sub tree of this instance func Sub(key string) *Viper { return v.Sub(key) } func (v *Viper) Sub(key string) *Viper { subv := New() data := v.Get(key) - if data == nil { - return nil - } - if reflect.TypeOf(data).Kind() == reflect.Map { subv.config = cast.ToStringMap(data) return subv + } else { + return nil } - return nil } -// GetString returns the value associated with the key as a string. +// Returns the value associated with the key as a string func GetString(key string) string { return v.GetString(key) } func (v *Viper) GetString(key string) string { return cast.ToString(v.Get(key)) } -// GetBool returns the value associated with the key as a boolean. +// Returns the value associated with the key as a boolean func GetBool(key string) bool { return v.GetBool(key) } func (v *Viper) GetBool(key string) bool { return cast.ToBool(v.Get(key)) } -// GetInt returns the value associated with the key as an integer. +// Returns the value associated with the key as an integer func GetInt(key string) int { return v.GetInt(key) } func (v *Viper) GetInt(key string) int { return cast.ToInt(v.Get(key)) } -// GetInt64 returns the value associated with the key as an integer. +// Returns the value associated with the key as an integer func GetInt64(key string) int64 { return v.GetInt64(key) } func (v *Viper) GetInt64(key string) int64 { return cast.ToInt64(v.Get(key)) } -// GetFloat64 returns the value associated with the key as a float64. +// Returns the value associated with the key as a float64 func GetFloat64(key string) float64 { return v.GetFloat64(key) } func (v *Viper) GetFloat64(key string) float64 { return cast.ToFloat64(v.Get(key)) } -// GetTime returns the value associated with the key as time. +// Returns the value associated with the key as time func GetTime(key string) time.Time { return v.GetTime(key) } func (v *Viper) GetTime(key string) time.Time { return cast.ToTime(v.Get(key)) } -// GetDuration returns the value associated with the key as a duration. +// Returns the value associated with the key as a duration func GetDuration(key string) time.Duration { return v.GetDuration(key) } func (v *Viper) GetDuration(key string) time.Duration { return cast.ToDuration(v.Get(key)) } -// GetStringSlice returns the value associated with the key as a slice of strings. +// Returns the value associated with the key as a slice of strings func GetStringSlice(key string) []string { return v.GetStringSlice(key) } func (v *Viper) GetStringSlice(key string) []string { return cast.ToStringSlice(v.Get(key)) } -// GetStringMap returns the value associated with the key as a map of interfaces. +// Returns the value associated with the key as a map of interfaces func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) } func (v *Viper) GetStringMap(key string) map[string]interface{} { return cast.ToStringMap(v.Get(key)) } -// GetStringMapString returns the value associated with the key as a map of strings. +// Returns the value associated with the key as a map of strings func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } func (v *Viper) GetStringMapString(key string) map[string]string { return cast.ToStringMapString(v.Get(key)) } -// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. +// Returns the value associated with the key as a map to a slice of strings. func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { return cast.ToStringMapStringSlice(v.Get(key)) } -// GetSizeInBytes returns the size of the value associated with the given key +// Returns the size of the value associated with the given key // in bytes. func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } func (v *Viper) GetSizeInBytes(key string) uint { @@ -717,25 +616,17 @@ func (v *Viper) GetSizeInBytes(key string) uint { return parseSizeInBytes(sizeStr) } -// UnmarshalKey takes a single key and unmarshals it into a Struct. +// Takes a single key and unmarshals it into a Struct func UnmarshalKey(key string, rawVal interface{}) error { return v.UnmarshalKey(key, rawVal) } func (v *Viper) UnmarshalKey(key string, rawVal interface{}) error { - err := decode(v.Get(key), defaultDecoderConfig(rawVal)) - - if err != nil { - return err - } - - v.insensitiviseMaps() - - return nil + return mapstructure.Decode(v.Get(key), rawVal) } -// Unmarshal unmarshals the config into a Struct. Make sure that the tags +// Unmarshals the config into a Struct. Make sure that the tags // on the fields of the structure are properly set. func Unmarshal(rawVal interface{}) error { return v.Unmarshal(rawVal) } func (v *Viper) Unmarshal(rawVal interface{}) error { - err := decode(v.AllSettings(), defaultDecoderConfig(rawVal)) + err := mapstructure.WeakDecode(v.AllSettings(), rawVal) if err != nil { return err @@ -746,19 +637,16 @@ func (v *Viper) Unmarshal(rawVal interface{}) error { return nil } -// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot -// of time.Duration values -func defaultDecoderConfig(output interface{}) *mapstructure.DecoderConfig { - return &mapstructure.DecoderConfig{ +// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality +// while erroring on non existing vals in the destination struct +func weakDecodeExact(input, output interface{}) error { + config := &mapstructure.DecoderConfig{ + ErrorUnused: true, Metadata: nil, Result: output, WeaklyTypedInput: true, - DecodeHook: mapstructure.StringToTimeDurationHookFunc(), } -} -// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality -func decode(input interface{}, config *mapstructure.DecoderConfig) error { decoder, err := mapstructure.NewDecoder(config) if err != nil { return err @@ -766,13 +654,10 @@ func decode(input interface{}, config *mapstructure.DecoderConfig) error { return decoder.Decode(input) } -// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent -// in the destination struct. +// Unmarshals the config into a Struct, erroring if a field is non-existant +// in the destination struct func (v *Viper) UnmarshalExact(rawVal interface{}) error { - config := defaultDecoderConfig(rawVal) - config.ErrorUnused = true - - err := decode(v.AllSettings(), config) + err := weakDecodeExact(v.AllSettings(), rawVal) if err != nil { return err @@ -783,27 +668,27 @@ func (v *Viper) UnmarshalExact(rawVal interface{}) error { return nil } -// BindPFlags binds a full flag set to the configuration, using each flag's long +// Bind a full flag set to the configuration, using each flag's long // name as the config key. -func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } -func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { +func BindPFlags(flags *pflag.FlagSet) (err error) { return v.BindPFlags(flags) } +func (v *Viper) BindPFlags(flags *pflag.FlagSet) (err error) { return v.BindFlagValues(pflagValueSet{flags}) } -// BindPFlag binds a specific key to a pflag (as used by cobra). +// Bind a specific key to a pflag (as used by cobra). // Example (where serverCmd is a Cobra instance): // // serverCmd.Flags().Int("port", 1138, "Port to run Application server on") // Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) // -func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } -func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { +func BindPFlag(key string, flag *pflag.Flag) (err error) { return v.BindPFlag(key, flag) } +func (v *Viper) BindPFlag(key string, flag *pflag.Flag) (err error) { return v.BindFlagValue(key, pflagValue{flag}) } -// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long +// Bind a full FlagValue set to the configuration, using each flag's long // name as the config key. -func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } +func BindFlagValues(flags FlagValueSet) (err error) { return v.BindFlagValues(flags) } func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { flags.VisitAll(func(flag FlagValue) { if err = v.BindFlagValue(flag.Name(), flag); err != nil { @@ -813,14 +698,14 @@ func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { return nil } -// BindFlagValue binds a specific key to a FlagValue. -// Example (where serverCmd is a Cobra instance): +// Bind a specific key to a FlagValue. +// Example(where serverCmd is a Cobra instance): // // serverCmd.Flags().Int("port", 1138, "Port to run Application server on") // Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port")) // -func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } -func (v *Viper) BindFlagValue(key string, flag FlagValue) error { +func BindFlagValue(key string, flag FlagValue) (err error) { return v.BindFlagValue(key, flag) } +func (v *Viper) BindFlagValue(key string, flag FlagValue) (err error) { if flag == nil { return fmt.Errorf("flag for %q is nil", key) } @@ -828,12 +713,12 @@ func (v *Viper) BindFlagValue(key string, flag FlagValue) error { return nil } -// BindEnv binds a Viper key to a ENV variable. -// ENV variables are case sensitive. +// Binds a Viper key to a ENV variable +// ENV variables are case sensitive // If only a key is provided, it will use the env key matching the key, uppercased. // EnvPrefix will be used when set when env name is not provided. -func BindEnv(input ...string) error { return v.BindEnv(input...) } -func (v *Viper) BindEnv(input ...string) error { +func BindEnv(input ...string) (err error) { return v.BindEnv(input...) } +func (v *Viper) BindEnv(input ...string) (err error) { var key, envkey string if len(input) == 0 { return fmt.Errorf("BindEnv missing key to bind to") @@ -852,149 +737,113 @@ func (v *Viper) BindEnv(input ...string) error { return nil } -// Given a key, find the value. +// Given a key, find the value // Viper will check in the following order: -// flag, env, config file, key/value store, default. -// Viper will check to see if an alias exists first. -// Note: this assumes a lower-cased key given. -func (v *Viper) find(lcaseKey string) interface{} { - - var ( - val interface{} - exists bool - path = strings.Split(lcaseKey, v.keyDelim) - nested = len(path) > 1 - ) - - // compute the path through the nested maps to the nested value - if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" { - return nil - } +// flag, env, config file, key/value store, default +// Viper will check to see if an alias exists first +func (v *Viper) find(key string) interface{} { + var val interface{} + var exists bool // if the requested key is an alias, then return the proper key - lcaseKey = v.realKey(lcaseKey) - path = strings.Split(lcaseKey, v.keyDelim) - nested = len(path) > 1 - - // Set() override first - val = v.searchMap(v.override, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.override) != "" { - return nil - } + key = v.realKey(key) - // PFlag override next - flag, exists := v.pflags[lcaseKey] + // PFlag Override first + flag, exists := v.pflags[key] if exists && flag.HasChanged() { + jww.TRACE.Println(key, "found in override (via pflag):", flag.ValueString()) switch flag.ValueType() { case "int", "int8", "int16", "int32", "int64": return cast.ToInt(flag.ValueString()) case "bool": return cast.ToBool(flag.ValueString()) - case "stringSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return res default: return flag.ValueString() } } - if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" { - return nil + + val, exists = v.override[key] + if exists { + jww.TRACE.Println(key, "found in override:", val) + return val } - // Env override next if v.automaticEnvApplied { // even if it hasn't been registered, if automaticEnv is used, // check any Get request - if val = v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); val != "" { + if val = v.getEnv(v.mergeWithEnvPrefix(key)); val != "" { + jww.TRACE.Println(key, "found in environment with val:", val) return val } - if nested && v.isPathShadowedInAutoEnv(path) != "" { - return nil - } } - envkey, exists := v.env[lcaseKey] + + envkey, exists := v.env[key] if exists { + jww.TRACE.Println(key, "registered as env var", envkey) if val = v.getEnv(envkey); val != "" { + jww.TRACE.Println(envkey, "found in environment with val:", val) return val + } else { + jww.TRACE.Println(envkey, "env value unset:") } } - if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { - return nil - } - // Config file next - val = v.searchMapWithPathPrefixes(v.config, path) - if val != nil { + val, exists = v.config[key] + if exists { + jww.TRACE.Println(key, "found in config:", val) return val } - if nested && v.isPathShadowedInDeepMap(path, v.config) != "" { - return nil - } - // K/V store next - val = v.searchMap(v.kvstore, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" { - return nil + // Test for nested config parameter + if strings.Contains(key, v.keyDelim) { + path := strings.Split(key, v.keyDelim) + + source := v.find(path[0]) + if source != nil { + if reflect.TypeOf(source).Kind() == reflect.Map { + val := v.searchMap(cast.ToStringMap(source), path[1:]) + jww.TRACE.Println(key, "found in nested config:", val) + return val + } + } } - // Default next - val = v.searchMap(v.defaults, path) - if val != nil { + val, exists = v.kvstore[key] + if exists { + jww.TRACE.Println(key, "found in key/value store:", val) return val } - if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" { - return nil - } - // last chance: if no other value is returned and a flag does exist for the value, - // get the flag's value even if the flag's value has not changed - if flag, exists := v.pflags[lcaseKey]; exists { - switch flag.ValueType() { - case "int", "int8", "int16", "int32", "int64": - return cast.ToInt(flag.ValueString()) - case "bool": - return cast.ToBool(flag.ValueString()) - case "stringSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return res - default: - return flag.ValueString() - } + val, exists = v.defaults[key] + if exists { + jww.TRACE.Println(key, "found in defaults:", val) + return val } - // last item, no need to check shadowing return nil } -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -// IsSet checks to see if the key has been set in any of the data locations. -// IsSet is case-insensitive for a key. +// Check to see if the key has been set in any of the data locations func IsSet(key string) bool { return v.IsSet(key) } func (v *Viper) IsSet(key string) bool { + path := strings.Split(key, v.keyDelim) + lcaseKey := strings.ToLower(key) val := v.find(lcaseKey) + + if val == nil { + source := v.find(strings.ToLower(path[0])) + if source != nil { + if reflect.TypeOf(source).Kind() == reflect.Map { + val = v.searchMap(cast.ToStringMap(source), path[1:]) + } + } + } + return val != nil } -// AutomaticEnv has Viper check ENV variables for all. +// Have Viper check ENV variables for all // keys set in config, default & flags func AutomaticEnv() { v.AutomaticEnv() } func (v *Viper) AutomaticEnv() { @@ -1053,11 +902,12 @@ func (v *Viper) realKey(key string) string { if exists { jww.DEBUG.Println("Alias", key, "to", newkey) return v.realKey(newkey) + } else { + return key } - return key } -// InConfig checks to see if the given key (or an alias) is in the config file. +// Check to see if the given key (or an alias) is in the config file func InConfig(key string) bool { return v.InConfig(key) } func (v *Viper) InConfig(key string) bool { // if the requested key is an alias, then return the proper key @@ -1067,85 +917,53 @@ func (v *Viper) InConfig(key string) bool { return exists } -// SetDefault sets the default value for this key. -// SetDefault is case-insensitive for a key. +// Set the default value for this key. // Default only used when no value is provided by the user via flag, config or ENV. func SetDefault(key string, value interface{}) { v.SetDefault(key, value) } func (v *Viper) SetDefault(key string, value interface{}) { // If alias passed in, then set the proper default key = v.realKey(strings.ToLower(key)) - value = toCaseInsensitiveValue(value) - - path := strings.Split(key, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v.defaults, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value + v.defaults[key] = value } -// Set sets the value for the key in the override regiser. -// Set is case-insensitive for a key. +// Sets the value for the key in the override regiser. // Will be used instead of values obtained via -// flags, config file, ENV, default, or key/value store. +// flags, config file, ENV, default, or key/value store func Set(key string, value interface{}) { v.Set(key, value) } func (v *Viper) Set(key string, value interface{}) { // If alias passed in, then set the proper override key = v.realKey(strings.ToLower(key)) - value = toCaseInsensitiveValue(value) - - path := strings.Split(key, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v.override, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value + v.override[key] = value } -// ReadInConfig will discover and load the configuration file from disk +// Viper will discover and load the configuration file from disk // and key/value stores, searching in one of the defined paths. func ReadInConfig() error { return v.ReadInConfig() } func (v *Viper) ReadInConfig() error { jww.INFO.Println("Attempting to read in config file") - filename, err := v.getConfigFile() - if err != nil { - return err - } - if !stringInSlice(v.getConfigType(), SupportedExts) { return UnsupportedConfigError(v.getConfigType()) } - file, err := afero.ReadFile(v.fs, filename) + file, err := afero.ReadFile(v.fs, v.getConfigFile()) if err != nil { return err } - config := make(map[string]interface{}) - - err = v.unmarshalReader(bytes.NewReader(file), config) - if err != nil { - return err - } + v.config = make(map[string]interface{}) - v.config = config - return nil + return v.unmarshalReader(bytes.NewReader(file), v.config) } // MergeInConfig merges a new configuration with an existing config. func MergeInConfig() error { return v.MergeInConfig() } func (v *Viper) MergeInConfig() error { jww.INFO.Println("Attempting to merge in config file") - filename, err := v.getConfigFile() - if err != nil { - return err - } - if !stringInSlice(v.getConfigType(), SupportedExts) { return UnsupportedConfigError(v.getConfigType()) } - file, err := afero.ReadFile(v.fs, filename) + file, err := afero.ReadFile(v.fs, v.getConfigFile()) if err != nil { return err } @@ -1153,7 +971,7 @@ func (v *Viper) MergeInConfig() error { return v.MergeConfig(bytes.NewReader(file)) } -// ReadConfig will read a configuration file, setting existing keys to nil if the +// Viper will read a configuration file, setting existing keys to nil if the // key does not exist in the file. func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } func (v *Viper) ReadConfig(in io.Reader) error { @@ -1195,22 +1013,6 @@ func castToMapStringInterface( return tgt } -func castMapStringToMapInterface(src map[string]string) map[string]interface{} { - tgt := map[string]interface{}{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} { - tgt := map[string]interface{}{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - // mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's // insistence on parsing nested structures as `map[interface{}]interface{}` // instead of using a `string` as the key for nest structures beyond one level @@ -1271,24 +1073,34 @@ func mergeMaps( } } -// ReadRemoteConfig attempts to get configuration from a remote source +// func ReadBufConfig(buf *bytes.Buffer) error { return v.ReadBufConfig(buf) } +// func (v *Viper) ReadBufConfig(buf *bytes.Buffer) error { +// v.config = make(map[string]interface{}) +// return v.unmarshalReader(buf, v.config) +// } + +// Attempts to get configuration from a remote source // and read it in the remote configuration registry. func ReadRemoteConfig() error { return v.ReadRemoteConfig() } func (v *Viper) ReadRemoteConfig() error { - return v.getKeyValueConfig() + err := v.getKeyValueConfig() + if err != nil { + return err + } + return nil } func WatchRemoteConfig() error { return v.WatchRemoteConfig() } func (v *Viper) WatchRemoteConfig() error { - return v.watchKeyValueConfig() -} - -func (v *Viper) WatchRemoteConfigOnChannel() error { - return v.watchKeyValueConfigOnChannel() + err := v.watchKeyValueConfig() + if err != nil { + return err + } + return nil } -// Unmarshal a Reader into a map. -// Should probably be an unexported function. +// Unmarshall a Reader into a map +// Should probably be an unexported function func unmarshalReader(in io.Reader, c map[string]interface{}) error { return v.unmarshalReader(in, c) } @@ -1304,7 +1116,7 @@ func (v *Viper) insensitiviseMaps() { insensitiviseMap(v.kvstore) } -// Retrieve the first found remote configuration. +// retrieve the first found remote configuration func (v *Viper) getKeyValueConfig() error { if RemoteConfig == nil { return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") @@ -1321,7 +1133,8 @@ func (v *Viper) getKeyValueConfig() error { return RemoteConfigError("No Files Found") } -func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { +func (v *Viper) getRemoteConfig(provider *defaultRemoteProvider) (map[string]interface{}, error) { + reader, err := RemoteConfig.Get(provider) if err != nil { return nil, err @@ -1330,24 +1143,7 @@ func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{} return v.kvstore, err } -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfigOnChannel() error { - for _, rp := range v.remoteProviders { - respc, _ := RemoteConfig.WatchChannel(rp) - //Todo: Add quit channel - go func(rc <-chan *RemoteResponse) { - for { - b := <-rc - reader := bytes.NewReader(b.Value) - v.unmarshalReader(reader, v.kvstore) - } - }(respc) - return nil - } - return RemoteConfigError("No Files Found") -} - -// Retrieve the first found remote configuration. +// retrieve the first found remote configuration func (v *Viper) watchKeyValueConfig() error { for _, rp := range v.remoteProviders { val, err := v.watchRemoteConfig(rp) @@ -1360,7 +1156,7 @@ func (v *Viper) watchKeyValueConfig() error { return RemoteConfigError("No Files Found") } -func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { +func (v *Viper) watchRemoteConfig(provider *defaultRemoteProvider) (map[string]interface{}, error) { reader, err := RemoteConfig.Watch(provider) if err != nil { return nil, err @@ -1369,115 +1165,65 @@ func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface return v.kvstore, err } -// AllKeys returns all keys holding a value, regardless of where they are set. -// Nested keys are returned with a v.keyDelim (= ".") separator +// Return all keys regardless where they are set func AllKeys() []string { return v.AllKeys() } func (v *Viper) AllKeys() []string { - m := map[string]bool{} - // add all paths, by order of descending priority to ensure correct shadowing - m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") - m = v.flattenAndMergeMap(m, v.override, "") - m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) - m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env)) - m = v.flattenAndMergeMap(m, v.config, "") - m = v.flattenAndMergeMap(m, v.kvstore, "") - m = v.flattenAndMergeMap(m, v.defaults, "") - - // convert set of paths to list - a := []string{} - for x := range m { - a = append(a, x) + m := map[string]struct{}{} + + for key, _ := range v.defaults { + m[strings.ToLower(key)] = struct{}{} } - return a -} -// flattenAndMergeMap recursively flattens the given map into a map[string]bool -// of key paths (used as a set, easier to manipulate than a []string): -// - each path is merged into a single key string, delimited with v.keyDelim (= ".") -// - if a path is shadowed by an earlier value in the initial shadow map, -// it is skipped. -// The resulting set of paths is merged to the given shadow set at the same time. -func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool { - if shadow != nil && prefix != "" && shadow[prefix] { - // prefix is shadowed => nothing more to flatten - return shadow + for key, _ := range v.pflags { + m[strings.ToLower(key)] = struct{}{} } - if shadow == nil { - shadow = make(map[string]bool) + + for key, _ := range v.env { + m[strings.ToLower(key)] = struct{}{} } - var m2 map[string]interface{} - if prefix != "" { - prefix += v.keyDelim + for key, _ := range v.config { + m[strings.ToLower(key)] = struct{}{} } - for k, val := range m { - fullKey := prefix + k - switch val.(type) { - case map[string]interface{}: - m2 = val.(map[string]interface{}) - case map[interface{}]interface{}: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = true - continue - } - // recursively merge to shadow map - shadow = v.flattenAndMergeMap(shadow, m2, fullKey) - } - return shadow -} - -// mergeFlatMap merges the given maps, excluding values of the second map -// shadowed by values from the first map. -func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool { - // scan keys -outer: - for k, _ := range m { - path := strings.Split(k, v.keyDelim) - // scan intermediate paths - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if shadow[parentKey] { - // path is shadowed, continue - continue outer - } - } - // add key - shadow[strings.ToLower(k)] = true + + for key, _ := range v.kvstore { + m[strings.ToLower(key)] = struct{}{} + } + + for key, _ := range v.override { + m[strings.ToLower(key)] = struct{}{} } - return shadow + + for key, _ := range v.aliases { + m[strings.ToLower(key)] = struct{}{} + } + + a := []string{} + for x, _ := range m { + a = append(a, x) + } + + return a } -// AllSettings merges all settings and returns them as a map[string]interface{}. +// Return all settings as a map[string]interface{} func AllSettings() map[string]interface{} { return v.AllSettings() } func (v *Viper) AllSettings() map[string]interface{} { m := map[string]interface{}{} - // start from the list of keys, and construct the map one value at a time - for _, k := range v.AllKeys() { - value := v.Get(k) - if value == nil { - // should not happen, since AllKeys() returns only keys holding a value, - // check just in case anything changes - continue - } - path := strings.Split(k, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(m, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value + for _, x := range v.AllKeys() { + m[x] = v.Get(x) } + return m } -// SetFs sets the filesystem to use to read configuration. +// Se the filesystem to use to read configuration. func SetFs(fs afero.Fs) { v.SetFs(fs) } func (v *Viper) SetFs(fs afero.Fs) { v.fs = fs } -// SetConfigName sets name for the config file. +// Name for the config file. // Does not include extension. func SetConfigName(in string) { v.SetConfigName(in) } func (v *Viper) SetConfigName(in string) { @@ -1487,7 +1233,7 @@ func (v *Viper) SetConfigName(in string) { } } -// SetConfigType sets the type of the configuration returned by the +// Sets the type of the configuration returned by the // remote source, e.g. "json". func SetConfigType(in string) { v.SetConfigType(in) } func (v *Viper) SetConfigType(in string) { @@ -1501,29 +1247,25 @@ func (v *Viper) getConfigType() string { return v.configType } - cf, err := v.getConfigFile() - if err != nil { - return "" - } - + cf := v.getConfigFile() ext := filepath.Ext(cf) if len(ext) > 1 { return ext[1:] + } else { + return "" } - - return "" } -func (v *Viper) getConfigFile() (string, error) { +func (v *Viper) getConfigFile() string { // if explicitly set, then use it if v.configFile != "" { - return v.configFile, nil + return v.configFile } cf, err := v.findConfigFile() if err != nil { - return "", err + return "" } v.configFile = cf @@ -1543,9 +1285,10 @@ func (v *Viper) searchInPath(in string) (filename string) { return "" } -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). +// search all configPaths for any config file. +// Returns the first path that exists (and is a config file) func (v *Viper) findConfigFile() (string, error) { + jww.INFO.Println("Searching for config in ", v.configPaths) for _, cp := range v.configPaths { @@ -1557,10 +1300,11 @@ func (v *Viper) findConfigFile() (string, error) { return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} } -// Debug prints all configuration registries for debugging +// Prints all configuration registries for debugging // purposes. func Debug() { v.Debug() } func (v *Viper) Debug() { + fmt.Println("Aliases:") fmt.Printf("Aliases:\n%#v\n", v.aliases) fmt.Printf("Override:\n%#v\n", v.override) fmt.Printf("PFlags:\n%#v\n", v.pflags) diff --git a/vendor/github.com/spf13/viper/viper_test.go b/vendor/github.com/spf13/viper/viper_test.go index 774ca1168ce..0c0c7e59bd4 100644 --- a/vendor/github.com/spf13/viper/viper_test.go +++ b/vendor/github.com/spf13/viper/viper_test.go @@ -8,7 +8,6 @@ package viper import ( "bytes" "fmt" - "io" "io/ioutil" "os" "path" @@ -18,8 +17,6 @@ import ( "testing" "time" - "github.com/spf13/cast" - "github.com/spf13/pflag" "github.com/stretchr/testify/assert" ) @@ -107,9 +104,8 @@ var remoteExample = []byte(`{ func initConfigs() { Reset() - var r io.Reader SetConfigType("yaml") - r = bytes.NewReader(yamlExample) + r := bytes.NewReader(yamlExample) unmarshalReader(r, v.config) SetConfigType("json") @@ -133,18 +129,12 @@ func initConfigs() { unmarshalReader(remote, v.kvstore) } -func initConfig(typ, config string) { +func initYAML() { Reset() - SetConfigType(typ) - r := strings.NewReader(config) - - if err := unmarshalReader(r, v.config); err != nil { - panic(err) - } -} + SetConfigType("yaml") + r := bytes.NewReader(yamlExample) -func initYAML() { - initConfig("yaml", string(yamlExample)) + unmarshalReader(r, v.config) } func initJSON() { @@ -243,9 +233,7 @@ func (s *stringValue) String() string { func TestBasics(t *testing.T) { SetConfigFile("/tmp/config.yaml") - filename, err := v.getConfigFile() - assert.Equal(t, "/tmp/config.yaml", filename) - assert.NoError(t, err) + assert.Equal(t, "/tmp/config.yaml", v.getConfigFile()) } func TestDefault(t *testing.T) { @@ -271,7 +259,7 @@ func TestUnmarshalling(t *testing.T) { assert.False(t, InConfig("state")) assert.Equal(t, "steve", Get("name")) assert.Equal(t, []interface{}{"skateboarding", "snowboarding", "go"}, Get("hobbies")) - assert.Equal(t, map[string]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[string]interface{}{"size": "large"}}, Get("clothing")) + assert.Equal(t, map[interface{}]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[interface{}]interface{}{"size": "large"}}, Get("clothing")) assert.Equal(t, 35, Get("age")) } @@ -432,9 +420,9 @@ func TestSetEnvReplacer(t *testing.T) { func TestAllKeys(t *testing.T) { initConfigs() - ks := sort.StringSlice{"title", "newkey", "owner.organization", "owner.dob", "owner.bio", "name", "beard", "ppu", "batters.batter", "hobbies", "clothing.jacket", "clothing.trousers", "clothing.pants.size", "age", "hacker", "id", "type", "eyes", "p_id", "p_ppu", "p_batters.batter.type", "p_type", "p_name", "foos"} + ks := sort.StringSlice{"title", "newkey", "owner", "name", "beard", "ppu", "batters", "hobbies", "clothing", "age", "hacker", "id", "type", "eyes", "p_id", "p_ppu", "p_batters.batter.type", "p_type", "p_name", "foos"} dob, _ := time.Parse(time.RFC3339, "1979-05-27T07:32:00Z") - all := map[string]interface{}{"owner": map[string]interface{}{"organization": "MongoDB", "bio": "MongoDB Chief Developer Advocate & Hacker at Large", "dob": dob}, "title": "TOML Example", "ppu": 0.55, "eyes": "brown", "clothing": map[string]interface{}{"trousers": "denim", "jacket": "leather", "pants": map[string]interface{}{"size": "large"}}, "id": "0001", "batters": map[string]interface{}{"batter": []interface{}{map[string]interface{}{"type": "Regular"}, map[string]interface{}{"type": "Chocolate"}, map[string]interface{}{"type": "Blueberry"}, map[string]interface{}{"type": "Devil's Food"}}}, "hacker": true, "beard": true, "hobbies": []interface{}{"skateboarding", "snowboarding", "go"}, "age": 35, "type": "donut", "newkey": "remote", "name": "Cake", "p_id": "0001", "p_ppu": "0.55", "p_name": "Cake", "p_batters": map[string]interface{}{"batter": map[string]interface{}{"type": "Regular"}}, "p_type": "donut", "foos": []map[string]interface{}{map[string]interface{}{"foo": []map[string]interface{}{map[string]interface{}{"key": 1}, map[string]interface{}{"key": 2}, map[string]interface{}{"key": 3}, map[string]interface{}{"key": 4}}}}} + all := map[string]interface{}{"owner": map[string]interface{}{"organization": "MongoDB", "Bio": "MongoDB Chief Developer Advocate & Hacker at Large", "dob": dob}, "title": "TOML Example", "ppu": 0.55, "eyes": "brown", "clothing": map[interface{}]interface{}{"trousers": "denim", "jacket": "leather", "pants": map[interface{}]interface{}{"size": "large"}}, "id": "0001", "batters": map[string]interface{}{"batter": []interface{}{map[string]interface{}{"type": "Regular"}, map[string]interface{}{"type": "Chocolate"}, map[string]interface{}{"type": "Blueberry"}, map[string]interface{}{"type": "Devil's Food"}}}, "hacker": true, "beard": true, "hobbies": []interface{}{"skateboarding", "snowboarding", "go"}, "age": 35, "type": "donut", "newkey": "remote", "name": "Cake", "p_id": "0001", "p_ppu": "0.55", "p_name": "Cake", "p_batters.batter.type": "Regular", "p_type": "donut", "foos": []map[string]interface{}{map[string]interface{}{"foo": []map[string]interface{}{map[string]interface{}{"key": 1}, map[string]interface{}{"key": 2}, map[string]interface{}{"key": 3}, map[string]interface{}{"key": 4}}}}} var allkeys sort.StringSlice allkeys = AllKeys() @@ -445,25 +433,13 @@ func TestAllKeys(t *testing.T) { assert.Equal(t, all, AllSettings()) } -func TestAllKeysWithEnv(t *testing.T) { - v := New() - - // bind and define environment variables (including a nested one) - v.BindEnv("id") - v.BindEnv("foo.bar") - v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) - os.Setenv("ID", "13") - os.Setenv("FOO_BAR", "baz") - - expectedKeys := sort.StringSlice{"id", "foo.bar"} - expectedKeys.Sort() - keys := sort.StringSlice(v.AllKeys()) - keys.Sort() - assert.Equal(t, expectedKeys, keys) +func TestCaseInSensitive(t *testing.T) { + assert.Equal(t, true, Get("hacker")) + Set("Title", "Checking Case") + assert.Equal(t, "Checking Case", Get("tItle")) } func TestAliasesOfAliases(t *testing.T) { - Set("Title", "Checking Case") RegisterAlias("Foo", "Bar") RegisterAlias("Bar", "Title") assert.Equal(t, "Checking Case", Get("FOO")) @@ -477,12 +453,10 @@ func TestRecursiveAliases(t *testing.T) { func TestUnmarshal(t *testing.T) { SetDefault("port", 1313) Set("name", "Steve") - Set("duration", "1s1ms") type config struct { - Port int - Name string - Duration time.Duration + Port int + Name string } var C config @@ -492,18 +466,17 @@ func TestUnmarshal(t *testing.T) { t.Fatalf("unable to decode into struct, %v", err) } - assert.Equal(t, &config{Name: "Steve", Port: 1313, Duration: time.Second + time.Millisecond}, &C) + assert.Equal(t, &C, &config{Name: "Steve", Port: 1313}) Set("port", 1234) err = Unmarshal(&C) if err != nil { t.Fatalf("unable to decode into struct, %v", err) } - assert.Equal(t, &config{Name: "Steve", Port: 1234, Duration: time.Second + time.Millisecond}, &C) + assert.Equal(t, &C, &config{Name: "Steve", Port: 1234}) } func TestBindPFlags(t *testing.T) { - v := New() // create independent Viper object flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) var testValues = map[string]*string{ @@ -518,11 +491,11 @@ func TestBindPFlags(t *testing.T) { "endpoint": "/public", } - for name := range testValues { + for name, _ := range testValues { testValues[name] = flagSet.String(name, "", "test") } - err := v.BindPFlags(flagSet) + err := BindPFlags(flagSet) if err != nil { t.Fatalf("error binding flag set, %v", err) } @@ -533,49 +506,11 @@ func TestBindPFlags(t *testing.T) { }) for name, expected := range mutatedTestValues { - assert.Equal(t, expected, v.Get(name)) + assert.Equal(t, Get(name), expected) } } -func TestBindPFlagsStringSlice(t *testing.T) { - for _, testValue := range []struct { - Expected []string - Value string - }{ - {[]string{}, ""}, - {[]string{"jeden"}, "jeden"}, - {[]string{"dwa", "trzy"}, "dwa,trzy"}, - {[]string{"cztery", "piec , szesc"}, "cztery,\"piec , szesc\""}} { - - for _, changed := range []bool{true, false} { - v := New() // create independent Viper object - flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) - flagSet.StringSlice("stringslice", testValue.Expected, "test") - flagSet.Visit(func(f *pflag.Flag) { - if len(testValue.Value) > 0 { - f.Value.Set(testValue.Value) - f.Changed = changed - } - }) - - err := v.BindPFlags(flagSet) - if err != nil { - t.Fatalf("error binding flag set, %v", err) - } - - type TestStr struct { - StringSlice []string - } - val := &TestStr{} - if err := v.Unmarshal(val); err != nil { - t.Fatalf("%+#v cannot unmarshal: %s", testValue.Value, err) - } - assert.Equal(t, testValue.Expected, val.StringSlice) - } - } -} - func TestBindPFlag(t *testing.T) { var testString = "testing" var testValue = newStringValue(testString, &testString) @@ -598,6 +533,7 @@ func TestBindPFlag(t *testing.T) { } func TestBoundCaseSensitivity(t *testing.T) { + assert.Equal(t, "brown", Get("eyes")) BindEnv("eYEs", "TURTLE_EYES") @@ -694,19 +630,19 @@ func TestFindsNestedKeys(t *testing.T) { "age": 35, "owner": map[string]interface{}{ "organization": "MongoDB", - "bio": "MongoDB Chief Developer Advocate & Hacker at Large", + "Bio": "MongoDB Chief Developer Advocate & Hacker at Large", "dob": dob, }, - "owner.bio": "MongoDB Chief Developer Advocate & Hacker at Large", + "owner.Bio": "MongoDB Chief Developer Advocate & Hacker at Large", "type": "donut", "id": "0001", "name": "Cake", "hacker": true, "ppu": 0.55, - "clothing": map[string]interface{}{ + "clothing": map[interface{}]interface{}{ "jacket": "leather", "trousers": "denim", - "pants": map[string]interface{}{ + "pants": map[interface{}]interface{}{ "size": "large", }, }, @@ -752,7 +688,7 @@ func TestReadBufConfig(t *testing.T) { assert.False(t, v.InConfig("state")) assert.Equal(t, "steve", v.Get("name")) assert.Equal(t, []interface{}{"skateboarding", "snowboarding", "go"}, v.Get("hobbies")) - assert.Equal(t, map[string]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[string]interface{}{"size": "large"}}, v.Get("clothing")) + assert.Equal(t, map[interface{}]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[interface{}]interface{}{"size": "large"}}, v.Get("clothing")) assert.Equal(t, 35, v.Get("age")) } @@ -802,27 +738,7 @@ func TestWrongDirsSearchNotFound(t *testing.T) { v.AddConfigPath(`thispathaintthere`) err := v.ReadInConfig() - assert.Equal(t, reflect.TypeOf(ConfigFileNotFoundError{"", ""}), reflect.TypeOf(err)) - - // Even though config did not load and the error might have - // been ignored by the client, the default still loads - assert.Equal(t, `default`, v.GetString(`key`)) -} - -func TestWrongDirsSearchNotFoundForMerge(t *testing.T) { - - _, config, cleanup := initDirs(t) - defer cleanup() - - v := New() - v.SetConfigName(config) - v.SetDefault(`key`, `default`) - - v.AddConfigPath(`whattayoutalkingbout`) - v.AddConfigPath(`thispathaintthere`) - - err := v.MergeInConfig() - assert.Equal(t, reflect.TypeOf(ConfigFileNotFoundError{"", ""}), reflect.TypeOf(err)) + assert.Equal(t, reflect.TypeOf(UnsupportedConfigError("")), reflect.TypeOf(err)) // Even though config did not load and the error might have // been ignored by the client, the default still loads @@ -841,10 +757,7 @@ func TestSub(t *testing.T) { assert.Equal(t, v.Get("clothing.pants.size"), subv.Get("size")) subv = v.Sub("clothing.pants.size") - assert.Equal(t, (*Viper)(nil), subv) - - subv = v.Sub("missing.key") - assert.Equal(t, (*Viper)(nil), subv) + assert.Equal(t, subv, (*Viper)(nil)) } var yamlMergeExampleTgt = []byte(` @@ -965,257 +878,32 @@ func TestMergeConfigNoMerge(t *testing.T) { } func TestUnmarshalingWithAliases(t *testing.T) { - v := New() - v.SetDefault("ID", 1) - v.Set("name", "Steve") - v.Set("lastname", "Owen") + SetDefault("Id", 1) + Set("name", "Steve") + Set("lastname", "Owen") - v.RegisterAlias("UserID", "ID") - v.RegisterAlias("Firstname", "name") - v.RegisterAlias("Surname", "lastname") + RegisterAlias("UserID", "Id") + RegisterAlias("Firstname", "name") + RegisterAlias("Surname", "lastname") type config struct { - ID int + Id int FirstName string Surname string } var C config - err := v.Unmarshal(&C) + + err := Unmarshal(&C) if err != nil { t.Fatalf("unable to decode into struct, %v", err) } - assert.Equal(t, &config{ID: 1, FirstName: "Steve", Surname: "Owen"}, &C) + assert.Equal(t, &C, &config{Id: 1, FirstName: "Steve", Surname: "Owen"}) } func TestSetConfigNameClearsFileCache(t *testing.T) { SetConfigFile("/tmp/config.yaml") SetConfigName("default") - f, err := v.getConfigFile() - if err == nil { - t.Fatalf("config file cache should have been cleared") - } - assert.Empty(t, f) -} - -func TestShadowedNestedValue(t *testing.T) { - - config := `name: steve -clothing: - jacket: leather - trousers: denim - pants: - size: large -` - initConfig("yaml", config) - - assert.Equal(t, "steve", GetString("name")) - - polyester := "polyester" - SetDefault("clothing.shirt", polyester) - SetDefault("clothing.jacket.price", 100) - - assert.Equal(t, "leather", GetString("clothing.jacket")) - assert.Nil(t, Get("clothing.jacket.price")) - assert.Equal(t, polyester, GetString("clothing.shirt")) - - clothingSettings := AllSettings()["clothing"].(map[string]interface{}) - assert.Equal(t, "leather", clothingSettings["jacket"]) - assert.Equal(t, polyester, clothingSettings["shirt"]) -} - -func TestDotParameter(t *testing.T) { - initJSON() - // shoud take precedence over batters defined in jsonExample - r := bytes.NewReader([]byte(`{ "batters.batter": [ { "type": "Small" } ] }`)) - unmarshalReader(r, v.config) - - actual := Get("batters.batter") - expected := []interface{}{map[string]interface{}{"type": "Small"}} - assert.Equal(t, expected, actual) -} - -func TestCaseInsensitive(t *testing.T) { - for _, config := range []struct { - typ string - content string - }{ - {"yaml", ` -aBcD: 1 -eF: - gH: 2 - iJk: 3 - Lm: - nO: 4 - P: - Q: 5 - R: 6 -`}, - {"json", `{ - "aBcD": 1, - "eF": { - "iJk": 3, - "Lm": { - "P": { - "Q": 5, - "R": 6 - }, - "nO": 4 - }, - "gH": 2 - } -}`}, - {"toml", `aBcD = 1 -[eF] -gH = 2 -iJk = 3 -[eF.Lm] -nO = 4 -[eF.Lm.P] -Q = 5 -R = 6 -`}, - } { - doTestCaseInsensitive(t, config.typ, config.content) - } -} - -func TestCaseInsensitiveSet(t *testing.T) { - Reset() - m1 := map[string]interface{}{ - "Foo": 32, - "Bar": map[interface{}]interface { - }{ - "ABc": "A", - "cDE": "B"}, - } - - m2 := map[string]interface{}{ - "Foo": 52, - "Bar": map[interface{}]interface { - }{ - "bCd": "A", - "eFG": "B"}, - } - - Set("Given1", m1) - Set("Number1", 42) - - SetDefault("Given2", m2) - SetDefault("Number2", 52) - - // Verify SetDefault - if v := Get("number2"); v != 52 { - t.Fatalf("Expected 52 got %q", v) - } - - if v := Get("given2.foo"); v != 52 { - t.Fatalf("Expected 52 got %q", v) - } - - if v := Get("given2.bar.bcd"); v != "A" { - t.Fatalf("Expected A got %q", v) - } - - if _, ok := m2["Foo"]; !ok { - t.Fatal("Input map changed") - } - - // Verify Set - if v := Get("number1"); v != 42 { - t.Fatalf("Expected 42 got %q", v) - } - - if v := Get("given1.foo"); v != 32 { - t.Fatalf("Expected 32 got %q", v) - } - - if v := Get("given1.bar.abc"); v != "A" { - t.Fatalf("Expected A got %q", v) - } - - if _, ok := m1["Foo"]; !ok { - t.Fatal("Input map changed") - } -} - -func TestParseNested(t *testing.T) { - type duration struct { - Delay time.Duration - } - - type item struct { - Name string - Delay time.Duration - Nested duration - } - - config := `[[parent]] - delay="100ms" - [parent.nested] - delay="200ms" -` - initConfig("toml", config) - - var items []item - err := v.UnmarshalKey("parent", &items) - if err != nil { - t.Fatalf("unable to decode into struct, %v", err) - } - - assert.Equal(t, 1, len(items)) - assert.Equal(t, 100*time.Millisecond, items[0].Delay) - assert.Equal(t, 200*time.Millisecond, items[0].Nested.Delay) -} - -func doTestCaseInsensitive(t *testing.T, typ, config string) { - initConfig(typ, config) - Set("RfD", true) - assert.Equal(t, true, Get("rfd")) - assert.Equal(t, true, Get("rFD")) - assert.Equal(t, 1, cast.ToInt(Get("abcd"))) - assert.Equal(t, 1, cast.ToInt(Get("Abcd"))) - assert.Equal(t, 2, cast.ToInt(Get("ef.gh"))) - assert.Equal(t, 3, cast.ToInt(Get("ef.ijk"))) - assert.Equal(t, 4, cast.ToInt(Get("ef.lm.no"))) - assert.Equal(t, 5, cast.ToInt(Get("ef.lm.p.q"))) - -} - -func BenchmarkGetBool(b *testing.B) { - key := "BenchmarkGetBool" - v = New() - v.Set(key, true) - - for i := 0; i < b.N; i++ { - if !v.GetBool(key) { - b.Fatal("GetBool returned false") - } - } -} - -func BenchmarkGet(b *testing.B) { - key := "BenchmarkGet" - v = New() - v.Set(key, true) - - for i := 0; i < b.N; i++ { - if !v.Get(key).(bool) { - b.Fatal("Get returned false") - } - } -} - -// This is the "perfect result" for the above. -func BenchmarkGetBoolFromMap(b *testing.B) { - m := make(map[string]bool) - key := "BenchmarkGetBool" - m[key] = true - - for i := 0; i < b.N; i++ { - if !m[key] { - b.Fatal("Map value was false") - } - } + assert.Empty(t, v.getConfigFile()) } diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s deleted file mode 100644 index 469bfa10039..00000000000 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -#include "textflag.h" - -// -// System call support for ARM, OpenBSD -// - -// Just jump to package syscall's implementation for all these functions. -// The runtime may know about them. - -TEXT ·Syscall(SB),NOSPLIT,$0-28 - B syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-40 - B syscall·Syscall6(SB) - -TEXT ·Syscall9(SB),NOSPLIT,$0-52 - B syscall·Syscall9(SB) - -TEXT ·RawSyscall(SB),NOSPLIT,$0-28 - B syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 - B syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go deleted file mode 100644 index 83b6bceab43..00000000000 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd - -package unix - -import ( - errorspkg "errors" - "fmt" -) - -// Go implementation of C mostly found in /usr/src/sys/kern/subr_capability.c - -const ( - // This is the version of CapRights this package understands. See C implementation for parallels. - capRightsGoVersion = CAP_RIGHTS_VERSION_00 - capArSizeMin = CAP_RIGHTS_VERSION_00 + 2 - capArSizeMax = capRightsGoVersion + 2 -) - -var ( - bit2idx = []int{ - -1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, - 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - } -) - -func capidxbit(right uint64) int { - return int((right >> 57) & 0x1f) -} - -func rightToIndex(right uint64) (int, error) { - idx := capidxbit(right) - if idx < 0 || idx >= len(bit2idx) { - return -2, fmt.Errorf("index for right 0x%x out of range", right) - } - return bit2idx[idx], nil -} - -func caprver(right uint64) int { - return int(right >> 62) -} - -func capver(rights *CapRights) int { - return caprver(rights.Rights[0]) -} - -func caparsize(rights *CapRights) int { - return capver(rights) + 2 -} - -// CapRightsSet sets the permissions in setrights in rights. -func CapRightsSet(rights *CapRights, setrights []uint64) error { - // This is essentially a copy of cap_rights_vset() - if capver(rights) != CAP_RIGHTS_VERSION_00 { - return fmt.Errorf("bad rights version %d", capver(rights)) - } - - n := caparsize(rights) - if n < capArSizeMin || n > capArSizeMax { - return errorspkg.New("bad rights size") - } - - for _, right := range setrights { - if caprver(right) != CAP_RIGHTS_VERSION_00 { - return errorspkg.New("bad right version") - } - i, err := rightToIndex(right) - if err != nil { - return err - } - if i >= n { - return errorspkg.New("index overflow") - } - if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch") - } - rights.Rights[i] |= right - if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch (after assign)") - } - } - - return nil -} - -// CapRightsClear clears the permissions in clearrights from rights. -func CapRightsClear(rights *CapRights, clearrights []uint64) error { - // This is essentially a copy of cap_rights_vclear() - if capver(rights) != CAP_RIGHTS_VERSION_00 { - return fmt.Errorf("bad rights version %d", capver(rights)) - } - - n := caparsize(rights) - if n < capArSizeMin || n > capArSizeMax { - return errorspkg.New("bad rights size") - } - - for _, right := range clearrights { - if caprver(right) != CAP_RIGHTS_VERSION_00 { - return errorspkg.New("bad right version") - } - i, err := rightToIndex(right) - if err != nil { - return err - } - if i >= n { - return errorspkg.New("index overflow") - } - if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch") - } - rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF) - if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch (after assign)") - } - } - - return nil -} - -// CapRightsIsSet checks whether all the permissions in setrights are present in rights. -func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) { - // This is essentially a copy of cap_rights_is_vset() - if capver(rights) != CAP_RIGHTS_VERSION_00 { - return false, fmt.Errorf("bad rights version %d", capver(rights)) - } - - n := caparsize(rights) - if n < capArSizeMin || n > capArSizeMax { - return false, errorspkg.New("bad rights size") - } - - for _, right := range setrights { - if caprver(right) != CAP_RIGHTS_VERSION_00 { - return false, errorspkg.New("bad right version") - } - i, err := rightToIndex(right) - if err != nil { - return false, err - } - if i >= n { - return false, errorspkg.New("index overflow") - } - if capidxbit(rights.Rights[i]) != capidxbit(right) { - return false, errorspkg.New("index mismatch") - } - if (rights.Rights[i] & right) != right { - return false, nil - } - } - - return true, nil -} - -func capright(idx uint64, bit uint64) uint64 { - return ((1 << (57 + idx)) | bit) -} - -// CapRightsInit returns a pointer to an initialised CapRights structure filled with rights. -// See man cap_rights_init(3) and rights(4). -func CapRightsInit(rights []uint64) (*CapRights, error) { - var r CapRights - r.Rights[0] = (capRightsGoVersion << 62) | capright(0, 0) - r.Rights[1] = capright(1, 0) - - err := CapRightsSet(&r, rights) - if err != nil { - return nil, err - } - return &r, nil -} - -// CapRightsLimit reduces the operations permitted on fd to at most those contained in rights. -// The capability rights on fd can never be increased by CapRightsLimit. -// See man cap_rights_limit(2) and rights(4). -func CapRightsLimit(fd uintptr, rights *CapRights) error { - return capRightsLimit(int(fd), rights) -} - -// CapRightsGet returns a CapRights structure containing the operations permitted on fd. -// See man cap_rights_get(3) and rights(4). -func CapRightsGet(fd uintptr) (*CapRights, error) { - r, err := CapRightsInit(nil) - if err != nil { - return nil, err - } - err = capRightsGet(capRightsGoVersion, int(fd), r) - if err != nil { - return nil, err - } - return r, nil -} diff --git a/vendor/golang.org/x/sys/unix/creds_test.go b/vendor/golang.org/x/sys/unix/creds_test.go index 7ae3305320a..eaae7c367fa 100644 --- a/vendor/golang.org/x/sys/unix/creds_test.go +++ b/vendor/golang.org/x/sys/unix/creds_test.go @@ -21,116 +21,101 @@ import ( // sockets. The SO_PASSCRED socket option is enabled on the sending // socket for this to work. func TestSCMCredentials(t *testing.T) { - socketTypeTests := []struct { - socketType int - dataLen int - }{ - { - unix.SOCK_STREAM, - 1, - }, { - unix.SOCK_DGRAM, - 0, - }, + fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("Socketpair: %v", err) } + defer unix.Close(fds[0]) + defer unix.Close(fds[1]) - for _, tt := range socketTypeTests { - fds, err := unix.Socketpair(unix.AF_LOCAL, tt.socketType, 0) - if err != nil { - t.Fatalf("Socketpair: %v", err) - } - defer unix.Close(fds[0]) - defer unix.Close(fds[1]) - - err = unix.SetsockoptInt(fds[0], unix.SOL_SOCKET, unix.SO_PASSCRED, 1) - if err != nil { - t.Fatalf("SetsockoptInt: %v", err) - } - - srvFile := os.NewFile(uintptr(fds[0]), "server") - defer srvFile.Close() - srv, err := net.FileConn(srvFile) - if err != nil { - t.Errorf("FileConn: %v", err) - return - } - defer srv.Close() + err = unix.SetsockoptInt(fds[0], unix.SOL_SOCKET, unix.SO_PASSCRED, 1) + if err != nil { + t.Fatalf("SetsockoptInt: %v", err) + } - cliFile := os.NewFile(uintptr(fds[1]), "client") - defer cliFile.Close() - cli, err := net.FileConn(cliFile) - if err != nil { - t.Errorf("FileConn: %v", err) - return - } - defer cli.Close() + srvFile := os.NewFile(uintptr(fds[0]), "server") + defer srvFile.Close() + srv, err := net.FileConn(srvFile) + if err != nil { + t.Errorf("FileConn: %v", err) + return + } + defer srv.Close() - var ucred unix.Ucred - if os.Getuid() != 0 { - ucred.Pid = int32(os.Getpid()) - ucred.Uid = 0 - ucred.Gid = 0 - oob := unix.UnixCredentials(&ucred) - _, _, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil) - if op, ok := err.(*net.OpError); ok { - err = op.Err - } - if sys, ok := err.(*os.SyscallError); ok { - err = sys.Err - } - if err != syscall.EPERM { - t.Fatalf("WriteMsgUnix failed with %v, want EPERM", err) - } - } + cliFile := os.NewFile(uintptr(fds[1]), "client") + defer cliFile.Close() + cli, err := net.FileConn(cliFile) + if err != nil { + t.Errorf("FileConn: %v", err) + return + } + defer cli.Close() + var ucred unix.Ucred + if os.Getuid() != 0 { ucred.Pid = int32(os.Getpid()) - ucred.Uid = uint32(os.Getuid()) - ucred.Gid = uint32(os.Getgid()) + ucred.Uid = 0 + ucred.Gid = 0 oob := unix.UnixCredentials(&ucred) - - // On SOCK_STREAM, this is internally going to send a dummy byte - n, oobn, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil) - if err != nil { - t.Fatalf("WriteMsgUnix: %v", err) + _, _, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil) + if op, ok := err.(*net.OpError); ok { + err = op.Err } - if n != 0 { - t.Fatalf("WriteMsgUnix n = %d, want 0", n) + if sys, ok := err.(*os.SyscallError); ok { + err = sys.Err } - if oobn != len(oob) { - t.Fatalf("WriteMsgUnix oobn = %d, want %d", oobn, len(oob)) + if err != syscall.EPERM { + t.Fatalf("WriteMsgUnix failed with %v, want EPERM", err) } + } - oob2 := make([]byte, 10*len(oob)) - n, oobn2, flags, _, err := srv.(*net.UnixConn).ReadMsgUnix(nil, oob2) - if err != nil { - t.Fatalf("ReadMsgUnix: %v", err) - } - if flags != 0 { - t.Fatalf("ReadMsgUnix flags = 0x%x, want 0", flags) - } - if n != tt.dataLen { - t.Fatalf("ReadMsgUnix n = %d, want %d", n, tt.dataLen) - } - if oobn2 != oobn { - // without SO_PASSCRED set on the socket, ReadMsgUnix will - // return zero oob bytes - t.Fatalf("ReadMsgUnix oobn = %d, want %d", oobn2, oobn) - } - oob2 = oob2[:oobn2] - if !bytes.Equal(oob, oob2) { - t.Fatal("ReadMsgUnix oob bytes don't match") - } + ucred.Pid = int32(os.Getpid()) + ucred.Uid = uint32(os.Getuid()) + ucred.Gid = uint32(os.Getgid()) + oob := unix.UnixCredentials(&ucred) - scm, err := unix.ParseSocketControlMessage(oob2) - if err != nil { - t.Fatalf("ParseSocketControlMessage: %v", err) - } - newUcred, err := unix.ParseUnixCredentials(&scm[0]) - if err != nil { - t.Fatalf("ParseUnixCredentials: %v", err) - } - if *newUcred != ucred { - t.Fatalf("ParseUnixCredentials = %+v, want %+v", newUcred, ucred) - } + // this is going to send a dummy byte + n, oobn, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil) + if err != nil { + t.Fatalf("WriteMsgUnix: %v", err) + } + if n != 0 { + t.Fatalf("WriteMsgUnix n = %d, want 0", n) + } + if oobn != len(oob) { + t.Fatalf("WriteMsgUnix oobn = %d, want %d", oobn, len(oob)) + } + + oob2 := make([]byte, 10*len(oob)) + n, oobn2, flags, _, err := srv.(*net.UnixConn).ReadMsgUnix(nil, oob2) + if err != nil { + t.Fatalf("ReadMsgUnix: %v", err) + } + if flags != 0 { + t.Fatalf("ReadMsgUnix flags = 0x%x, want 0", flags) + } + if n != 1 { + t.Fatalf("ReadMsgUnix n = %d, want 1 (dummy byte)", n) + } + if oobn2 != oobn { + // without SO_PASSCRED set on the socket, ReadMsgUnix will + // return zero oob bytes + t.Fatalf("ReadMsgUnix oobn = %d, want %d", oobn2, oobn) + } + oob2 = oob2[:oobn2] + if !bytes.Equal(oob, oob2) { + t.Fatal("ReadMsgUnix oob bytes don't match") + } + + scm, err := unix.ParseSocketControlMessage(oob2) + if err != nil { + t.Fatalf("ParseSocketControlMessage: %v", err) + } + newUcred, err := unix.ParseUnixCredentials(&scm[0]) + if err != nil { + t.Fatalf("ParseUnixCredentials: %v", err) + } + if *newUcred != ucred { + t.Fatalf("ParseUnixCredentials = %+v, want %+v", newUcred, ucred) } } diff --git a/vendor/golang.org/x/sys/unix/dev_darwin.go b/vendor/golang.org/x/sys/unix/dev_darwin.go deleted file mode 100644 index 8d1dc0fa3d9..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_darwin.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Functions to access/create device major and minor numbers matching the -// encoding used in Darwin's sys/types.h header. - -package unix - -// Major returns the major component of a Darwin device number. -func Major(dev uint64) uint32 { - return uint32((dev >> 24) & 0xff) -} - -// Minor returns the minor component of a Darwin device number. -func Minor(dev uint64) uint32 { - return uint32(dev & 0xffffff) -} - -// Mkdev returns a Darwin device number generated from the given major and minor -// components. -func Mkdev(major, minor uint32) uint64 { - return (uint64(major) << 24) | uint64(minor) -} diff --git a/vendor/golang.org/x/sys/unix/dev_darwin_test.go b/vendor/golang.org/x/sys/unix/dev_darwin_test.go deleted file mode 100644 index 48d04486038..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_darwin_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix_test - -import ( - "fmt" - "testing" - - "golang.org/x/sys/unix" -) - -func TestDevices(t *testing.T) { - testCases := []struct { - path string - major uint32 - minor uint32 - }{ - // Most of the device major/minor numbers on Darwin are - // dynamically generated by devfs. These are some well-known - // static numbers. - {"/dev/ttyp0", 4, 0}, - {"/dev/ttys0", 4, 48}, - {"/dev/ptyp0", 5, 0}, - {"/dev/ptyr0", 5, 32}, - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { - var stat unix.Stat_t - err := unix.Stat(tc.path, &stat) - if err != nil { - t.Errorf("failed to stat device: %v", err) - return - } - - dev := uint64(stat.Rdev) - if unix.Major(dev) != tc.major { - t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) - } - if unix.Minor(dev) != tc.minor { - t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) - } - if unix.Mkdev(tc.major, tc.minor) != dev { - t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) - } - }) - } -} diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly.go b/vendor/golang.org/x/sys/unix/dev_dragonfly.go deleted file mode 100644 index 8502f202ce3..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_dragonfly.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Functions to access/create device major and minor numbers matching the -// encoding used in Dragonfly's sys/types.h header. -// -// The information below is extracted and adapted from sys/types.h: -// -// Minor gives a cookie instead of an index since in order to avoid changing the -// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for -// devices that don't use them. - -package unix - -// Major returns the major component of a DragonFlyBSD device number. -func Major(dev uint64) uint32 { - return uint32((dev >> 8) & 0xff) -} - -// Minor returns the minor component of a DragonFlyBSD device number. -func Minor(dev uint64) uint32 { - return uint32(dev & 0xffff00ff) -} - -// Mkdev returns a DragonFlyBSD device number generated from the given major and -// minor components. -func Mkdev(major, minor uint32) uint64 { - return (uint64(major) << 8) | uint64(minor) -} diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go b/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go deleted file mode 100644 index 2caba08a8d5..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix_test - -import ( - "fmt" - "testing" - - "golang.org/x/sys/unix" -) - -func TestDevices(t *testing.T) { - testCases := []struct { - path string - major uint32 - minor uint32 - }{ - // Minor is a cookie instead of an index on DragonFlyBSD - {"/dev/null", 10, 0x00000002}, - {"/dev/random", 10, 0x00000003}, - {"/dev/urandom", 10, 0x00000004}, - {"/dev/zero", 10, 0x0000000c}, - {"/dev/bpf", 15, 0xffff00ff}, - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { - var stat unix.Stat_t - err := unix.Stat(tc.path, &stat) - if err != nil { - t.Errorf("failed to stat device: %v", err) - return - } - - dev := uint64(stat.Rdev) - if unix.Major(dev) != tc.major { - t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) - } - if unix.Minor(dev) != tc.minor { - t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) - } - if unix.Mkdev(tc.major, tc.minor) != dev { - t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) - } - }) - } -} diff --git a/vendor/golang.org/x/sys/unix/dev_freebsd.go b/vendor/golang.org/x/sys/unix/dev_freebsd.go deleted file mode 100644 index eba3b4bd387..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_freebsd.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Functions to access/create device major and minor numbers matching the -// encoding used in FreeBSD's sys/types.h header. -// -// The information below is extracted and adapted from sys/types.h: -// -// Minor gives a cookie instead of an index since in order to avoid changing the -// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for -// devices that don't use them. - -package unix - -// Major returns the major component of a FreeBSD device number. -func Major(dev uint64) uint32 { - return uint32((dev >> 8) & 0xff) -} - -// Minor returns the minor component of a FreeBSD device number. -func Minor(dev uint64) uint32 { - return uint32(dev & 0xffff00ff) -} - -// Mkdev returns a FreeBSD device number generated from the given major and -// minor components. -func Mkdev(major, minor uint32) uint64 { - return (uint64(major) << 8) | uint64(minor) -} diff --git a/vendor/golang.org/x/sys/unix/dev_linux.go b/vendor/golang.org/x/sys/unix/dev_linux.go deleted file mode 100644 index d165d6f3085..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_linux.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Functions to access/create device major and minor numbers matching the -// encoding used by the Linux kernel and glibc. -// -// The information below is extracted and adapted from bits/sysmacros.h in the -// glibc sources: -// -// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's -// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major -// number and m is a hex digit of the minor number. This is backward compatible -// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also -// backward compatible with the Linux kernel, which for some architectures uses -// 32-bit dev_t, encoded as mmmM MMmm. - -package unix - -// Major returns the major component of a Linux device number. -func Major(dev uint64) uint32 { - major := uint32((dev & 0x00000000000fff00) >> 8) - major |= uint32((dev & 0xfffff00000000000) >> 32) - return major -} - -// Minor returns the minor component of a Linux device number. -func Minor(dev uint64) uint32 { - minor := uint32((dev & 0x00000000000000ff) >> 0) - minor |= uint32((dev & 0x00000ffffff00000) >> 12) - return minor -} - -// Mkdev returns a Linux device number generated from the given major and minor -// components. -func Mkdev(major, minor uint32) uint64 { - dev := (uint64(major) & 0x00000fff) << 8 - dev |= (uint64(major) & 0xfffff000) << 32 - dev |= (uint64(minor) & 0x000000ff) << 0 - dev |= (uint64(minor) & 0xffffff00) << 12 - return dev -} diff --git a/vendor/golang.org/x/sys/unix/dev_linux_test.go b/vendor/golang.org/x/sys/unix/dev_linux_test.go deleted file mode 100644 index 6e001f37ae6..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_linux_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix_test - -import ( - "fmt" - "testing" - - "golang.org/x/sys/unix" -) - -func TestDevices(t *testing.T) { - testCases := []struct { - path string - major uint32 - minor uint32 - }{ - // well known major/minor numbers according to - // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/admin-guide/devices.txt - {"/dev/null", 1, 3}, - {"/dev/zero", 1, 5}, - {"/dev/random", 1, 8}, - {"/dev/full", 1, 7}, - {"/dev/urandom", 1, 9}, - {"/dev/tty", 5, 0}, - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { - var stat unix.Stat_t - err := unix.Stat(tc.path, &stat) - if err != nil { - t.Errorf("failed to stat device: %v", err) - return - } - - dev := uint64(stat.Rdev) - if unix.Major(dev) != tc.major { - t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) - } - if unix.Minor(dev) != tc.minor { - t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) - } - if unix.Mkdev(tc.major, tc.minor) != dev { - t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) - } - }) - - } -} diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd.go b/vendor/golang.org/x/sys/unix/dev_netbsd.go deleted file mode 100644 index b4a203d0c54..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_netbsd.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Functions to access/create device major and minor numbers matching the -// encoding used in NetBSD's sys/types.h header. - -package unix - -// Major returns the major component of a NetBSD device number. -func Major(dev uint64) uint32 { - return uint32((dev & 0x000fff00) >> 8) -} - -// Minor returns the minor component of a NetBSD device number. -func Minor(dev uint64) uint32 { - minor := uint32((dev & 0x000000ff) >> 0) - minor |= uint32((dev & 0xfff00000) >> 12) - return minor -} - -// Mkdev returns a NetBSD device number generated from the given major and minor -// components. -func Mkdev(major, minor uint32) uint64 { - dev := (uint64(major) << 8) & 0x000fff00 - dev |= (uint64(minor) << 12) & 0xfff00000 - dev |= (uint64(minor) << 0) & 0x000000ff - return dev -} diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd_test.go b/vendor/golang.org/x/sys/unix/dev_netbsd_test.go deleted file mode 100644 index c39a80a48ec..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_netbsd_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix_test - -import ( - "fmt" - "testing" - - "golang.org/x/sys/unix" -) - -func TestDevices(t *testing.T) { - testCases := []struct { - path string - major uint32 - minor uint32 - }{ - // well known major/minor numbers according to /dev/MAKEDEV on - // NetBSD 7.0 - {"/dev/null", 2, 2}, - {"/dev/zero", 2, 12}, - {"/dev/ttyp0", 5, 0}, - {"/dev/ttyp1", 5, 1}, - {"/dev/random", 46, 0}, - {"/dev/urandom", 46, 1}, - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { - var stat unix.Stat_t - err := unix.Stat(tc.path, &stat) - if err != nil { - t.Errorf("failed to stat device: %v", err) - return - } - - dev := uint64(stat.Rdev) - if unix.Major(dev) != tc.major { - t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) - } - if unix.Minor(dev) != tc.minor { - t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) - } - if unix.Mkdev(tc.major, tc.minor) != dev { - t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) - } - }) - } -} diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd.go b/vendor/golang.org/x/sys/unix/dev_openbsd.go deleted file mode 100644 index f3430c42ff0..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_openbsd.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Functions to access/create device major and minor numbers matching the -// encoding used in OpenBSD's sys/types.h header. - -package unix - -// Major returns the major component of an OpenBSD device number. -func Major(dev uint64) uint32 { - return uint32((dev & 0x0000ff00) >> 8) -} - -// Minor returns the minor component of an OpenBSD device number. -func Minor(dev uint64) uint32 { - minor := uint32((dev & 0x000000ff) >> 0) - minor |= uint32((dev & 0xffff0000) >> 8) - return minor -} - -// Mkdev returns an OpenBSD device number generated from the given major and minor -// components. -func Mkdev(major, minor uint32) uint64 { - dev := (uint64(major) << 8) & 0x0000ff00 - dev |= (uint64(minor) << 8) & 0xffff0000 - dev |= (uint64(minor) << 0) & 0x000000ff - return dev -} diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd_test.go b/vendor/golang.org/x/sys/unix/dev_openbsd_test.go deleted file mode 100644 index 5635d271f4f..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_openbsd_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix_test - -import ( - "fmt" - "testing" - - "golang.org/x/sys/unix" -) - -func TestDevices(t *testing.T) { - testCases := []struct { - path string - major uint32 - minor uint32 - }{ - // well known major/minor numbers according to /dev/MAKEDEV on - // OpenBSD 6.0 - {"/dev/null", 2, 2}, - {"/dev/zero", 2, 12}, - {"/dev/ttyp0", 5, 0}, - {"/dev/ttyp1", 5, 1}, - {"/dev/random", 45, 0}, - {"/dev/srandom", 45, 1}, - {"/dev/urandom", 45, 2}, - {"/dev/arandom", 45, 3}, - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { - var stat unix.Stat_t - err := unix.Stat(tc.path, &stat) - if err != nil { - t.Errorf("failed to stat device: %v", err) - return - } - - dev := uint64(stat.Rdev) - if unix.Major(dev) != tc.major { - t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) - } - if unix.Minor(dev) != tc.minor { - t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) - } - if unix.Mkdev(tc.major, tc.minor) != dev { - t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) - } - }) - } -} diff --git a/vendor/golang.org/x/sys/unix/dev_solaris_test.go b/vendor/golang.org/x/sys/unix/dev_solaris_test.go deleted file mode 100644 index db58c0d05e7..00000000000 --- a/vendor/golang.org/x/sys/unix/dev_solaris_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix_test - -import ( - "fmt" - "testing" - - "golang.org/x/sys/unix" -) - -func TestDevices(t *testing.T) { - testCases := []struct { - path string - major uint32 - minor uint32 - }{ - // Well-known major/minor numbers on OpenSolaris according to - // /etc/name_to_major - {"/dev/zero", 134, 12}, - {"/dev/null", 134, 2}, - {"/dev/ptyp0", 172, 0}, - {"/dev/ttyp0", 175, 0}, - {"/dev/ttyp1", 175, 1}, - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) { - var stat unix.Stat_t - err := unix.Stat(tc.path, &stat) - if err != nil { - t.Errorf("failed to stat device: %v", err) - return - } - - dev := uint64(stat.Rdev) - if unix.Major(dev) != tc.major { - t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major) - } - if unix.Minor(dev) != tc.minor { - t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor) - } - if unix.Mkdev(tc.major, tc.minor) != dev { - t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev) - } - }) - } -} diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go deleted file mode 100644 index c56bc8b05e0..00000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IP_FAITH = 0x16 - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go deleted file mode 100644 index 3e9771175ab..00000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IP_FAITH = 0x16 - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - SIOCADDRT = 0x8040720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8040720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go deleted file mode 100644 index 856dca32543..00000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix - -const ( - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - - // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go - IFF_SMART = 0x20 - IFT_FAITH = 0xf2 - IFT_IPXIP = 0xf9 - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IP_FAITH = 0x16 - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/file_unix.go b/vendor/golang.org/x/sys/unix/file_unix.go deleted file mode 100644 index 47f6a83f216..00000000000 --- a/vendor/golang.org/x/sys/unix/file_unix.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix - -import ( - "os" - "syscall" -) - -// FIXME: unexported function from os -// syscallMode returns the syscall-specific mode bits from Go's portable mode bits. -func syscallMode(i os.FileMode) (o uint32) { - o |= uint32(i.Perm()) - if i&os.ModeSetuid != 0 { - o |= syscall.S_ISUID - } - if i&os.ModeSetgid != 0 { - o |= syscall.S_ISGID - } - if i&os.ModeSticky != 0 { - o |= syscall.S_ISVTX - } - // No mapping for Go's ModeTemporary (plan9 only). - return -} diff --git a/vendor/golang.org/x/sys/unix/flock.go b/vendor/golang.org/x/sys/unix/flock.go index 2994ce75f20..ce67a59528a 100644 --- a/vendor/golang.org/x/sys/unix/flock.go +++ b/vendor/golang.org/x/sys/unix/flock.go @@ -1,3 +1,5 @@ +// +build linux darwin freebsd openbsd netbsd dragonfly + // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go new file mode 100644 index 00000000000..56332692c42 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go @@ -0,0 +1,20 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo,linux,sparc64 + +package unix + +import "syscall" + +//extern sysconf +func realSysconf(name int) int64 + +func sysconf(name int) (n int64, err syscall.Errno) { + r := realSysconf(name) + if r < 0 { + return 0, syscall.GetErrno() + } + return r, 0 +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index c3a08092687..f0d6566f205 100755 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -72,7 +72,7 @@ darwin_amd64) ;; darwin_arm) mkerrors="$mkerrors" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" + mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; darwin_arm64) @@ -108,7 +108,7 @@ freebsd_arm) mksyscall="./mksyscall.pl -l32 -arm" mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" # Let the type of C char be signed for making the bare syscall - # API consistent across platforms. + # API consistent across over platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; linux_sparc64) @@ -130,14 +130,6 @@ netbsd_amd64) mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; -netbsd_arm) - mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -netbsd -arm" - mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" - # Let the type of C char be signed for making the bare syscall - # API consistent across platforms. - mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" - ;; openbsd_386) mkerrors="$mkerrors -m32" mksyscall="./mksyscall.pl -l32 -openbsd" @@ -154,16 +146,6 @@ openbsd_amd64) mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; -openbsd_arm) - mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -openbsd -arm" - mksysctl="./mksysctl_openbsd.pl" - zsysctl="zsysctl_openbsd.go" - mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" - # Let the type of C char be signed for making the bare syscall - # API consistent across platforms. - mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" - ;; solaris_amd64) mksyscall="./mksyscall_solaris.pl" mkerrors="$mkerrors -m64" diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index f9f5e5691ce..5da82241721 100755 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -17,8 +17,8 @@ if test -z "$GOARCH" -o -z "$GOOS"; then fi # Check that we are using the new build system if we should -if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then - if [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then +if [[ "$GOOS" -eq "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then + if [[ "$GOLANG_SYS_BUILD" -ne "docker" ]]; then echo 1>&2 "In the new build system, mkerrors should not be called directly." echo 1>&2 "See README.md" exit 1 @@ -27,7 +27,7 @@ fi CC=${CC:-cc} -if [[ "$GOOS" = "solaris" ]]; then +if [[ "$GOOS" -eq "solaris" ]]; then # Assumes GNU versions of utilities in PATH. export PATH=/usr/gnu/bin:$PATH fi @@ -45,7 +45,6 @@ includes_Darwin=' #include #include #include -#include #include #include #include @@ -76,7 +75,6 @@ includes_DragonFly=' ' includes_FreeBSD=' -#include #include #include #include @@ -145,7 +143,6 @@ struct ltchars { #include #include -#include #include #include #include @@ -155,7 +152,6 @@ struct ltchars { #include #include #include -#include #include #include #include @@ -168,21 +164,16 @@ struct ltchars { #include #include #include -#include #include #include #include #include #include -#include -#include #include #include #include #include #include -#include -#include #include #include @@ -283,7 +274,6 @@ includes_SunOS=' #include #include #include -#include #include #include #include @@ -348,7 +338,6 @@ ccflags="$@" $2 !~ /^EXPR_/ && $2 ~ /^E[A-Z0-9_]+$/ || $2 ~ /^B[0-9_]+$/ || - $2 ~ /^(OLD|NEW)DEV$/ || $2 == "BOTHER" || $2 ~ /^CI?BAUD(EX)?$/ || $2 == "IBSHIFT" || @@ -384,9 +373,9 @@ ccflags="$@" $2 == "IFNAMSIZ" || $2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ || $2 ~ /^SYSCTL_VERS/ || - $2 ~ /^(MS|MNT|UMOUNT)_/ || + $2 ~ /^(MS|MNT)_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|E?FD|NAME|S|PTRACE|PT)_/ || + $2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || $2 !~ "NLA_TYPE_MASK" && @@ -400,26 +389,20 @@ ccflags="$@" $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || $2 ~ /^BIOC/ || $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ || - $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || + $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || $2 !~ /^(BPF_TIMEVAL)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^CLOCK_/ || $2 ~ /^CAN_/ || - $2 ~ /^CAP_/ || $2 ~ /^ALG_/ || $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE|IOC_(GET|SET)_ENCRYPTION)/ || $2 ~ /^GRND_/ || $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || - $2 ~ /^PERF_EVENT_IOC_/ || - $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SPLICE_/ || $2 ~ /^(VM|VMADDR)_/ || - $2 ~ /^(TASKSTATS|TS)_/ || - $2 ~ /^GENL_/ || - $2 ~ /^XATTR_(CREATE|REPLACE)/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} diff --git a/vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl b/vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl index a0a22bf560e..c83064fe3f8 100755 --- a/vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl +++ b/vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl @@ -40,8 +40,21 @@ package unix if($name eq 'SYS_SYS_EXIT'){ $name = 'SYS_EXIT'; } + if($name =~ /^SYS_CAP_+/ || $name =~ /^SYS___CAP_+/){ + next + } print " $name = $num; // $proto\n"; + + # We keep Capsicum syscall numbers for FreeBSD + # 9-STABLE here because we are not sure whether they + # are mature and stable. + if($num == 513){ + print " SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); }\n"; + print " SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \\\n"; + print " SYS_CAP_ENTER = 516 // { int cap_enter(void); }\n"; + print " SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); }\n"; + } } } diff --git a/vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl b/vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl index 85988b140b9..d31f2c48fb6 100755 --- a/vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl +++ b/vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl @@ -47,7 +47,7 @@ package unix $name = "$7_$11" if $11 ne ''; $name =~ y/a-z/A-Z/; - if($compat eq '' || $compat eq '13' || $compat eq '30' || $compat eq '50') { + if($compat eq '' || $compat eq '30' || $compat eq '50') { print " $name = $num; // $proto\n"; } } diff --git a/vendor/golang.org/x/sys/unix/mmap_unix_test.go b/vendor/golang.org/x/sys/unix/mmap_unix_test.go index 8de55feaf23..18ccec05f13 100644 --- a/vendor/golang.org/x/sys/unix/mmap_unix_test.go +++ b/vendor/golang.org/x/sys/unix/mmap_unix_test.go @@ -17,18 +17,6 @@ func TestMmap(t *testing.T) { if err != nil { t.Fatalf("Mmap: %v", err) } - if err := unix.Mprotect(b, unix.PROT_WRITE); err != nil { - t.Fatalf("Mprotect: %v", err) - } - - b[0] = 42 - - if err := unix.Msync(b, unix.MS_SYNC); err != nil { - t.Fatalf("Msync: %v", err) - } - if err := unix.Madvise(b, unix.MADV_DONTNEED); err != nil { - t.Fatalf("Madvise: %v", err) - } if err := unix.Munmap(b); err != nil { t.Fatalf("Munmap: %v", err) } diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go deleted file mode 100644 index 45afcf72d6a..00000000000 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -// For Unix, get the pagesize from the runtime. - -package unix - -import "syscall" - -func Getpagesize() int { - return syscall.Getpagesize() -} diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index c2846b32d6c..ccb29c75c46 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -561,19 +561,13 @@ func Utimes(path string, tv []Timeval) error { func UtimesNano(path string, ts []Timespec) error { if ts == nil { - err := utimensat(AT_FDCWD, path, nil, 0) - if err != ENOSYS { - return err - } return utimes(path, nil) } + // TODO: The BSDs can do utimensat with SYS_UTIMENSAT but it + // isn't supported by darwin so this uses utimes instead if len(ts) != 2 { return EINVAL } - err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) - if err != ENOSYS { - return err - } // Not as efficient as it could be because Timespec and // Timeval have different types in the different OSes tv := [2]Timeval{ @@ -583,16 +577,6 @@ func UtimesNano(path string, ts []Timespec) error { return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { - if ts == nil { - return utimensat(dirfd, path, nil, flags) - } - if len(ts) != 2 { - return EINVAL - } - return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) -} - //sys futimes(fd int, timeval *[2]Timeval) (err error) func Futimes(fd int, tv []Timeval) error { @@ -610,6 +594,9 @@ func Futimes(fd int, tv []Timeval) error { // TODO: wrap // Acct(name nil-string) (err error) // Gethostuuid(uuid *byte, timeout *Timespec) (err error) +// Madvise(addr *byte, len int, behav int) (err error) +// Mprotect(addr *byte, len int, prot int) (err error) +// Msync(addr *byte, len int, flags int) (err error) // Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error) var mapper = &mmapper{ @@ -625,11 +612,3 @@ func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, e func Munmap(b []byte) (err error) { return mapper.Munmap(b) } - -//sys Madvise(b []byte, behav int) (err error) -//sys Mlock(b []byte) (err error) -//sys Mlockall(flags int) (err error) -//sys Mprotect(b []byte, prot int) (err error) -//sys Msync(b []byte, flags int) (err error) -//sys Munlock(b []byte) (err error) -//sys Munlockall() (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index ad74a11fb3e..7d91ac02ac5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -187,11 +187,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error { - // Darwin doesn't support SYS_UTIMENSAT - return ENOSYS -} - /* * Wrapped */ @@ -200,45 +195,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error { func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } -//sys ioctl(fd int, req uint, arg uintptr) (err error) - -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func IoctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - /* * Exposed directly */ @@ -254,13 +210,10 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { //sys Dup2(from int, to int) (err error) //sys Exchangedata(path1 string, path2 string, options int) (err error) //sys Exit(code int) -//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) -//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) -//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 @@ -285,23 +238,23 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { //sys Kqueue() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) -//sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Mkdir(path string, mode uint32) (err error) -//sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) -//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) -//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) -//sys Renameat(fromfd int, from string, tofd int, to string) (err error) //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK @@ -322,13 +275,11 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 //sys Symlink(path string, link string) (err error) -//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) //sys Truncate(path string, length int64) (err error) //sys Umask(newmask int) (oldmask int) //sys Undelete(path string) (err error) //sys Unlink(path string) (err error) -//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) @@ -368,6 +319,9 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { // Add_profil // Kdebug_trace // Sigreturn +// Mmap +// Mlock +// Munlock // Atsocket // Kqueue_from_portset_np // Kqueue_portset @@ -460,6 +414,8 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { // Lio_listio // __pthread_cond_wait // Iopolicysys +// Mlockall +// Munlockall // __pthread_kill // __pthread_sigmask // __sigwait @@ -513,6 +469,7 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { // Sendmsg_nocancel // Recvfrom_nocancel // Accept_nocancel +// Msync_nocancel // Fcntl_nocancel // Select_nocancel // Fsync_nocancel diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 76634f7ab14..c172a3da5a3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -11,6 +11,8 @@ import ( "unsafe" ) +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 7be02dab9d9..fc1e5a4a825 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -11,6 +11,10 @@ import ( "unsafe" ) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) + +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index 26b66972f01..d286cf408d8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -9,6 +9,8 @@ import ( "unsafe" ) +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 4d67a874272..c33905cdcd9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -11,6 +11,8 @@ import ( "unsafe" ) +func Getpagesize() int { return 16384 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 3a483373dce..7e0210fc951 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -174,6 +174,11 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { //sys Mkdir(path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) @@ -213,7 +218,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { //sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) -//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) /* * Unimplemented @@ -248,6 +252,9 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { // Add_profil // Kdebug_trace // Sigreturn +// Mmap +// Mlock +// Munlock // Atsocket // Kqueue_from_portset_np // Kqueue_portset @@ -340,6 +347,8 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { // Lio_listio // __pthread_cond_wait // Iopolicysys +// Mlockall +// Munlockall // __pthread_kill // __pthread_sigmask // __sigwait @@ -392,6 +401,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { // Sendmsg_nocancel // Recvfrom_nocancel // Accept_nocancel +// Msync_nocancel // Fcntl_nocancel // Select_nocancel // Fsync_nocancel diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 6d8952d5a11..da7cb7982cd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -11,6 +11,8 @@ import ( "unsafe" ) +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index d26e52eaefe..077d1f39ac0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -352,53 +352,11 @@ func Llistxattr(link string, dest []byte) (sz int, err error) { return s, e } -//sys ioctl(fd int, req uint, arg uintptr) (err error) - -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func IoctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - /* * Exposed directly */ //sys Access(path string, mode uint32) (err error) //sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) -//sys CapEnter() (err error) -//sys capRightsGet(version int, fd int, rightsp *CapRights) (err error) = SYS___CAP_RIGHTS_GET -//sys capRightsLimit(fd int, rightsp *CapRights) (err error) //sys Chdir(path string) (err error) //sys Chflags(path string, flags int) (err error) //sys Chmod(path string, mode uint32) (err error) @@ -421,13 +379,10 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { //sys ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) //sys ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_POSIX_FADVISE -//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) -//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) -//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -454,24 +409,24 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { //sys Kqueue() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) -//sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Mkdir(path string, mode uint32) (err error) -//sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) -//sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) -//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) -//sys Renameat(fromfd int, from string, tofd int, to string) (err error) //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK @@ -493,13 +448,11 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) //sys Symlink(path string, link string) (err error) -//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) //sys Truncate(path string, length int64) (err error) //sys Umask(newmask int) (oldmask int) //sys Undelete(path string) (err error) //sys Unlink(path string) (err error) -//sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) @@ -507,7 +460,6 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { //sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) -//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) /* * Unimplemented @@ -541,6 +493,9 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { // Add_profil // Kdebug_trace // Sigreturn +// Mmap +// Mlock +// Munlock // Atsocket // Kqueue_from_portset_np // Kqueue_portset @@ -633,6 +588,8 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { // Lio_listio // __pthread_cond_wait // Iopolicysys +// Mlockall +// Munlockall // __pthread_kill // __pthread_sigmask // __sigwait @@ -685,6 +642,7 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { // Sendmsg_nocancel // Recvfrom_nocancel // Accept_nocancel +// Msync_nocancel // Fcntl_nocancel // Select_nocancel // Fsync_nocancel diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 4cf5f453f52..6a0cd804d88 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -11,6 +11,8 @@ import ( "unsafe" ) +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index b8036e7268e..e142540efa4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -11,6 +11,8 @@ import ( "unsafe" ) +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 5a3bb6a154a..5504cb12559 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -11,6 +11,8 @@ import ( "unsafe" ) +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return ts.Sec*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go index 654439e02b6..3c3d8258693 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_test.go @@ -7,291 +7,14 @@ package unix_test import ( - "flag" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "runtime" "testing" "golang.org/x/sys/unix" ) func TestSysctlUint64(t *testing.T) { - _, err := unix.SysctlUint64("vm.swap_total") + _, err := unix.SysctlUint64("security.mac.labeled") if err != nil { t.Fatal(err) } } - -// FIXME: Infrastructure for launching tests in subprocesses stolen from openbsd_test.go - refactor? -// testCmd generates a proper command that, when executed, runs the test -// corresponding to the given key. - -type testProc struct { - fn func() // should always exit instead of returning - arg func(t *testing.T) string // generate argument for test - cleanup func(arg string) error // for instance, delete coredumps from testing pledge - success bool // whether zero-exit means success or failure -} - -var ( - testProcs = map[string]testProc{} - procName = "" - procArg = "" -) - -const ( - optName = "sys-unix-internal-procname" - optArg = "sys-unix-internal-arg" -) - -func init() { - flag.StringVar(&procName, optName, "", "internal use only") - flag.StringVar(&procArg, optArg, "", "internal use only") - -} - -func testCmd(procName string, procArg string) (*exec.Cmd, error) { - exe, err := filepath.Abs(os.Args[0]) - if err != nil { - return nil, err - } - cmd := exec.Command(exe, "-"+optName+"="+procName, "-"+optArg+"="+procArg) - cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr - return cmd, nil -} - -// ExitsCorrectly is a comprehensive, one-line-of-use wrapper for testing -// a testProc with a key. -func ExitsCorrectly(t *testing.T, procName string) { - s := testProcs[procName] - arg := "-" - if s.arg != nil { - arg = s.arg(t) - } - c, err := testCmd(procName, arg) - defer func(arg string) { - if err := s.cleanup(arg); err != nil { - t.Fatalf("Failed to run cleanup for %s %s %#v", procName, err, err) - } - }(arg) - if err != nil { - t.Fatalf("Failed to construct command for %s", procName) - } - if (c.Run() == nil) != s.success { - result := "succeed" - if !s.success { - result = "fail" - } - t.Fatalf("Process did not %s when it was supposed to", result) - } -} - -func TestMain(m *testing.M) { - flag.Parse() - if procName != "" { - t := testProcs[procName] - t.fn() - os.Stderr.WriteString("test function did not exit\n") - if t.success { - os.Exit(1) - } else { - os.Exit(0) - } - } - os.Exit(m.Run()) -} - -// end of infrastructure - -const testfile = "gocapmodetest" -const testfile2 = testfile + "2" - -func CapEnterTest() { - _, err := os.OpenFile(path.Join(procArg, testfile), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) - if err != nil { - panic(fmt.Sprintf("OpenFile: %s", err)) - } - - err = unix.CapEnter() - if err != nil { - panic(fmt.Sprintf("CapEnter: %s", err)) - } - - _, err = os.OpenFile(path.Join(procArg, testfile2), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) - if err == nil { - panic("OpenFile works!") - } - if err.(*os.PathError).Err != unix.ECAPMODE { - panic(fmt.Sprintf("OpenFile failed wrong: %s %#v", err, err)) - } - os.Exit(0) -} - -func makeTempDir(t *testing.T) string { - d, err := ioutil.TempDir("", "go_openat_test") - if err != nil { - t.Fatalf("TempDir failed: %s", err) - } - return d -} - -func removeTempDir(arg string) error { - err := os.RemoveAll(arg) - if err != nil && err.(*os.PathError).Err == unix.ENOENT { - return nil - } - return err -} - -func init() { - testProcs["cap_enter"] = testProc{ - CapEnterTest, - makeTempDir, - removeTempDir, - true, - } -} - -func TestCapEnter(t *testing.T) { - if runtime.GOARCH != "amd64" { - t.Skipf("skipping test on %s", runtime.GOARCH) - } - ExitsCorrectly(t, "cap_enter") -} - -func OpenatTest() { - f, err := os.Open(procArg) - if err != nil { - panic(err) - } - - err = unix.CapEnter() - if err != nil { - panic(fmt.Sprintf("CapEnter: %s", err)) - } - - fxx, err := unix.Openat(int(f.Fd()), "xx", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) - if err != nil { - panic(err) - } - unix.Close(fxx) - - // The right to open BASE/xx is not ambient - _, err = os.OpenFile(procArg+"/xx", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) - if err == nil { - panic("OpenFile succeeded") - } - if err.(*os.PathError).Err != unix.ECAPMODE { - panic(fmt.Sprintf("OpenFile failed wrong: %s %#v", err, err)) - } - - // Can't make a new directory either - err = os.Mkdir(procArg+"2", 0777) - if err == nil { - panic("MKdir succeeded") - } - if err.(*os.PathError).Err != unix.ECAPMODE { - panic(fmt.Sprintf("Mkdir failed wrong: %s %#v", err, err)) - } - - // Remove all caps except read and lookup. - r, err := unix.CapRightsInit([]uint64{unix.CAP_READ, unix.CAP_LOOKUP}) - if err != nil { - panic(fmt.Sprintf("CapRightsInit failed: %s %#v", err, err)) - } - err = unix.CapRightsLimit(f.Fd(), r) - if err != nil { - panic(fmt.Sprintf("CapRightsLimit failed: %s %#v", err, err)) - } - - // Check we can get the rights back again - r, err = unix.CapRightsGet(f.Fd()) - if err != nil { - panic(fmt.Sprintf("CapRightsGet failed: %s %#v", err, err)) - } - b, err := unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_LOOKUP}) - if err != nil { - panic(fmt.Sprintf("CapRightsIsSet failed: %s %#v", err, err)) - } - if !b { - panic(fmt.Sprintf("Unexpected rights")) - } - b, err = unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_LOOKUP, unix.CAP_WRITE}) - if err != nil { - panic(fmt.Sprintf("CapRightsIsSet failed: %s %#v", err, err)) - } - if b { - panic(fmt.Sprintf("Unexpected rights (2)")) - } - - // Can no longer create a file - _, err = unix.Openat(int(f.Fd()), "xx2", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) - if err == nil { - panic("Openat succeeded") - } - if err != unix.ENOTCAPABLE { - panic(fmt.Sprintf("OpenFileAt failed wrong: %s %#v", err, err)) - } - - // But can read an existing one - _, err = unix.Openat(int(f.Fd()), "xx", os.O_RDONLY, 0666) - if err != nil { - panic(fmt.Sprintf("Openat failed: %s %#v", err, err)) - } - - os.Exit(0) -} - -func init() { - testProcs["openat"] = testProc{ - OpenatTest, - makeTempDir, - removeTempDir, - true, - } -} - -func TestOpenat(t *testing.T) { - if runtime.GOARCH != "amd64" { - t.Skipf("skipping test on %s", runtime.GOARCH) - } - ExitsCorrectly(t, "openat") -} - -func TestCapRightsSetAndClear(t *testing.T) { - r, err := unix.CapRightsInit([]uint64{unix.CAP_READ, unix.CAP_WRITE, unix.CAP_PDWAIT}) - if err != nil { - t.Fatalf("CapRightsInit failed: %s", err) - } - - err = unix.CapRightsSet(r, []uint64{unix.CAP_EVENT, unix.CAP_LISTEN}) - if err != nil { - t.Fatalf("CapRightsSet failed: %s", err) - } - - b, err := unix.CapRightsIsSet(r, []uint64{unix.CAP_READ, unix.CAP_WRITE, unix.CAP_PDWAIT, unix.CAP_EVENT, unix.CAP_LISTEN}) - if err != nil { - t.Fatalf("CapRightsIsSet failed: %s", err) - } - if !b { - t.Fatalf("Wrong rights set") - } - - err = unix.CapRightsClear(r, []uint64{unix.CAP_READ, unix.CAP_PDWAIT}) - if err != nil { - t.Fatalf("CapRightsClear failed: %s", err) - } - - b, err = unix.CapRightsIsSet(r, []uint64{unix.CAP_WRITE, unix.CAP_EVENT, unix.CAP_LISTEN}) - if err != nil { - t.Fatalf("CapRightsIsSet failed: %s", err) - } - if !b { - t.Fatalf("Wrong rights set") - } -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 1b7d59d8963..4832bf57308 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -36,59 +36,6 @@ func Creat(path string, mode uint32) (fd int, err error) { return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode) } -//sys fchmodat(dirfd int, path string, mode uint32) (err error) - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior - // and check the flags. Otherwise the mode would be applied to the symlink - // destination which is not what the user expects. - if flags&^AT_SYMLINK_NOFOLLOW != 0 { - return EINVAL - } else if flags&AT_SYMLINK_NOFOLLOW != 0 { - return EOPNOTSUPP - } - return fchmodat(dirfd, path, mode) -} - -//sys ioctl(fd int, req uint, arg uintptr) (err error) - -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func IoctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) func Link(oldpath string, newpath string) (err error) { @@ -352,14 +299,10 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, return } -func Mkfifo(path string, mode uint32) error { +func Mkfifo(path string, mode uint32) (err error) { return Mknod(path, mode|S_IFIFO, 0) } -func Mkfifoat(dirfd int, path string, mode uint32) error { - return Mknodat(dirfd, path, mode|S_IFIFO, 0) -} - func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL @@ -931,13 +874,8 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from } var dummy byte if len(oob) > 0 { - var sockType int - sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) - if err != nil { - return - } // receive at least one normal byte - if sockType != SOCK_DGRAM && len(p) == 0 { + if len(p) == 0 { iov.Base = &dummy iov.SetLen(1) } @@ -983,13 +921,8 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) } var dummy byte if len(oob) > 0 { - var sockType int - sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) - if err != nil { - return 0, err - } // send at least one normal byte - if sockType != SOCK_DGRAM && len(p) == 0 { + if len(p) == 0 { iov.Base = &dummy iov.SetLen(1) } @@ -1219,12 +1152,12 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri //sysnb EpollCreate(size int) (fd int, err error) //sysnb EpollCreate1(flag int) (fd int, err error) //sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) -//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD2 //sys Exit(code int) = SYS_EXIT_GROUP //sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys Fdatasync(fd int) (err error) @@ -1251,11 +1184,7 @@ func Getpgrp() (pid int) { //sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) //sysnb Kill(pid int, sig syscall.Signal) (err error) //sys Klogctl(typ int, buf []byte) (n int, err error) = SYS_SYSLOG -//sys Lgetxattr(path string, attr string, dest []byte) (sz int, err error) //sys Listxattr(path string, dest []byte) (sz int, err error) -//sys Llistxattr(path string, dest []byte) (sz int, err error) -//sys Lremovexattr(path string, attr string) (err error) -//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) @@ -1289,7 +1218,6 @@ func Setgid(uid int) (err error) { //sys Setpriority(which int, who int, prio int) (err error) //sys Setxattr(path string, attr string, data []byte, flags int) (err error) //sys Sync() -//sys Syncfs(fd int) (err error) //sysnb Sysinfo(info *Sysinfo_t) (err error) //sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) //sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error) @@ -1324,9 +1252,8 @@ func Munmap(b []byte) (err error) { //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) -//sys Mlockall(flags int) (err error) -//sys Msync(b []byte, flags int) (err error) //sys Munlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) //sys Munlockall() (err error) // Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd, @@ -1366,6 +1293,7 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { // EpollCtlOld // EpollPwait // EpollWaitOld +// Eventfd // Execve // Fgetxattr // Flistxattr @@ -1384,16 +1312,22 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { // IoGetevents // IoSetup // IoSubmit +// Ioctl // IoprioGet // IoprioSet // KexecLoad +// Lgetxattr +// Llistxattr // LookupDcookie +// Lremovexattr +// Lsetxattr // Mbind // MigratePages // Mincore // ModifyLdt // Mount // MovePages +// Mprotect // MqGetsetattr // MqNotify // MqOpen @@ -1405,6 +1339,7 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { // Msgget // Msgrcv // Msgsnd +// Msync // Newfstatat // Nfsservctl // Personality diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index f4c826a4561..2b881b9793b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -14,6 +14,8 @@ import ( "unsafe" ) +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 0715200dcf4..9516a3fd7ef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -69,6 +69,8 @@ func Gettimeofday(tv *Timeval) (err error) { return nil } +func Getpagesize() int { return 4096 } + func Time(t *Time_t) (tt Time_t, err error) { var tv Timeval errno := gettimeofday(&tv) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 2b79c84a672..71d87022899 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -11,6 +11,8 @@ import ( "unsafe" ) +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 68cc975dbb0..4a136396cdd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -66,6 +66,8 @@ func Lstat(path string, stat *Stat_t) (err error) { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) +func Getpagesize() int { return 65536 } + //sysnb Gettimeofday(tv *Timeval) (err error) func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 977df441bef..73318e5c643 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -55,6 +55,8 @@ package unix //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) +func Getpagesize() int { return 65536 } + //sysnb Gettimeofday(tv *Timeval) (err error) func Time(t *Time_t) (tt Time_t, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 25a5a0da5a8..b83d93fdffe 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -235,3 +235,5 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { } return poll(&fds[0], len(fds), timeout) } + +func Getpagesize() int { return 4096 } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 28b7f350d15..60770f627c6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -61,6 +61,8 @@ package unix //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) +func Getpagesize() int { return 65536 } + //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Time(t *Time_t) (tt Time_t, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 3845fc9c43a..1708a4bbf9a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -46,6 +46,8 @@ import ( //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) //sysnb setgroups(n int, list *_Gid_t) (err error) +func Getpagesize() int { return 4096 } + //sysnb Gettimeofday(tv *Timeval) (err error) func Time(t *Time_t) (tt Time_t, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index bd9de3e9d05..20b7454d770 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -6,6 +6,11 @@ package unix +import ( + "sync/atomic" + "syscall" +) + //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) @@ -58,6 +63,21 @@ package unix //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) +func sysconf(name int) (n int64, err syscall.Errno) + +// pageSize caches the value of Getpagesize, since it can't change +// once the system is booted. +var pageSize int64 // accessed atomically + +func Getpagesize() int { + n := atomic.LoadInt64(&pageSize) + if n == 0 { + n, _ = sysconf(_SC_PAGESIZE) + atomic.StoreInt64(&pageSize, n) + } + return int(n) +} + func Ioperm(from int, num int, on int) (err error) { return ENOSYS } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go index 37702313073..91184cae069 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_test.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_test.go @@ -15,47 +15,6 @@ import ( "golang.org/x/sys/unix" ) -func TestFchmodat(t *testing.T) { - defer chtmpdir(t)() - - touch(t, "file1") - os.Symlink("file1", "symlink1") - - err := unix.Fchmodat(unix.AT_FDCWD, "symlink1", 0444, 0) - if err != nil { - t.Fatalf("Fchmodat: unexpected error: %v", err) - } - - fi, err := os.Stat("file1") - if err != nil { - t.Fatal(err) - } - - if fi.Mode() != 0444 { - t.Errorf("Fchmodat: failed to change mode: expected %v, got %v", 0444, fi.Mode()) - } - - err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", 0444, unix.AT_SYMLINK_NOFOLLOW) - if err != unix.EOPNOTSUPP { - t.Fatalf("Fchmodat: unexpected error: %v, expected EOPNOTSUPP", err) - } -} - -func TestIoctlGetInt(t *testing.T) { - f, err := os.Open("/dev/random") - if err != nil { - t.Fatalf("failed to open device: %v", err) - } - defer f.Close() - - v, err := unix.IoctlGetInt(int(f.Fd()), unix.RNDGETENTCNT) - if err != nil { - t.Fatalf("failed to perform ioctl: %v", err) - } - - t.Logf("%d bits of entropy available", v) -} - func TestPoll(t *testing.T) { f, cleanup := mktmpfifo(t) defer cleanup() diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index e1296684591..01f6a48c86e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -170,6 +170,11 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Mkdir(path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) @@ -205,7 +210,6 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys munmap(addr uintptr, length uintptr) (err error) //sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE -//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) /* * Unimplemented diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index baefa411ec2..afaca09838a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -6,6 +6,8 @@ package unix +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 59c2ab7ebaf..a6ff04ce5bd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -6,6 +6,8 @@ package unix +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 7208108a31a..68a6969b285 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -6,6 +6,8 @@ package unix +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 408e63081c0..c0d2b6c80dc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -149,6 +149,11 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { //sys Mkdir(path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mlock(b []byte) (err error) +//sys Mlockall(flags int) (err error) +//sys Mprotect(b []byte, prot int) (err error) +//sys Munlock(b []byte) (err error) +//sys Munlockall() (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) @@ -188,7 +193,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { //sys munmap(addr uintptr, length uintptr) (err error) //sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE -//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) /* * Unimplemented @@ -278,5 +282,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { // thrsleep // thrwakeup // unlinkat +// utimensat // vfork // writev diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index d3809b426cb..a66ddc59ce9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -6,6 +6,8 @@ package unix +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 9a9dfceffd8..0776c1faf98 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -6,6 +6,8 @@ package unix +func Getpagesize() int { return 4096 } + func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } func NsecToTimespec(nsec int64) (ts Timespec) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go deleted file mode 100644 index ba8649056fb..00000000000 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build arm,openbsd - -package unix - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int64(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return -} - -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return -} - -func SetKevent(k *Kevent_t, fd, mode, flags int) { - k.Ident = uint32(fd) - k.Filter = int16(mode) - k.Flags = uint16(flags) -} - -func (iov *Iovec) SetLen(length int) { - iov.Len = uint32(length) -} - -func (msghdr *Msghdr) SetControllen(length int) { - msghdr.Controllen = uint32(length) -} - -func (cmsg *Cmsghdr) SetLen(length int) { - cmsg.Len = uint32(length) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 35e5d72baf3..cab9b4fba94 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -13,6 +13,7 @@ package unix import ( + "sync/atomic" "syscall" "unsafe" ) @@ -514,65 +515,47 @@ func Acct(path string) (err error) { return acct(pathp) } -//sys __makedev(version int, major uint, minor uint) (val uint64) - -func Mkdev(major, minor uint32) uint64 { - return __makedev(NEWDEV, uint(major), uint(minor)) -} - -//sys __major(version int, dev uint64) (val uint) - -func Major(dev uint64) uint32 { - return uint32(__major(NEWDEV, dev)) -} - -//sys __minor(version int, dev uint64) (val uint) - -func Minor(dev uint64) uint32 { - return uint32(__minor(NEWDEV, dev)) -} - /* * Expose the ioctl function */ -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req int, arg uintptr) (err error) -func IoctlSetInt(fd int, req uint, value int) (err error) { +func IoctlSetInt(fd int, req int, value int) (err error) { return ioctl(fd, req, uintptr(value)) } -func IoctlSetWinsize(fd int, req uint, value *Winsize) (err error) { +func IoctlSetWinsize(fd int, req int, value *Winsize) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlSetTermios(fd int, req uint, value *Termios) (err error) { +func IoctlSetTermios(fd int, req int, value *Termios) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlSetTermio(fd int, req uint, value *Termio) (err error) { +func IoctlSetTermio(fd int, req int, value *Termio) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlGetInt(fd int, req uint) (int, error) { +func IoctlGetInt(fd int, req int) (int, error) { var value int err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { var value Winsize err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return &value, err } -func IoctlGetTermios(fd int, req uint) (*Termios, error) { +func IoctlGetTermios(fd int, req int) (*Termios, error) { var value Termios err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return &value, err } -func IoctlGetTermio(fd int, req uint) (*Termio, error) { +func IoctlGetTermio(fd int, req int) (*Termio, error) { var value Termio err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return &value, err @@ -598,7 +581,6 @@ func IoctlGetTermio(fd int, req uint) (*Termio, error) { //sys Fchown(fd int, uid int, gid int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Fdatasync(fd int) (err error) -//sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) @@ -630,7 +612,6 @@ func IoctlGetTermio(fd int, req uint) (*Termio, error) { //sys Mlock(b []byte) (err error) //sys Mlockall(flags int) (err error) //sys Mprotect(b []byte, prot int) (err error) -//sys Msync(b []byte, flags int) (err error) //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) @@ -717,3 +698,18 @@ func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, e func Munmap(b []byte) (err error) { return mapper.Munmap(b) } + +//sys sysconf(name int) (n int64, err error) + +// pageSize caches the value of Getpagesize, since it can't change +// once the system is booted. +var pageSize int64 // accessed atomically + +func Getpagesize() int { + n := atomic.LoadInt64(&pageSize) + if n == 0 { + n, _ = sysconf(_SC_PAGESIZE) + atomic.StoreInt64(&pageSize, n) + } + return int(n) +} diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go index 415124d4a1a..a3508174e41 100644 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ b/vendor/golang.org/x/sys/unix/types_darwin.go @@ -242,13 +242,9 @@ type BpfHdr C.struct_bpf_hdr type Termios C.struct_termios -type Winsize C.struct_winsize - // fchmodat-like syscalls. const ( AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go index 80b27781d1e..a818704eb1b 100644 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/types_dragonfly.go @@ -240,10 +240,3 @@ type BpfHdr C.struct_bpf_hdr // Terminal handling type Termios C.struct_termios - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go index 934fd7ace22..972e69a07c6 100644 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ b/vendor/golang.org/x/sys/unix/types_freebsd.go @@ -21,7 +21,6 @@ package unix #include #include #include -#include #include #include #include @@ -131,10 +130,7 @@ struct if_data8 { u_long ifi_iqdrops; u_long ifi_noproto; u_long ifi_hwassist; -// FIXME: these are now unions, so maybe need to change definitions? -#undef ifi_epoch time_t ifi_epoch; -#undef ifi_lastchange struct timeval ifi_lastchange; }; @@ -355,18 +351,3 @@ type BpfZbufHeader C.struct_bpf_zbuf_header // Terminal handling type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// Capabilities - -type CapRights C.struct_cap_rights diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go index cb95c80aabc..7cfdb9cd4b7 100644 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ b/vendor/golang.org/x/sys/unix/types_netbsd.go @@ -227,13 +227,6 @@ type BpfTimeval C.struct_bpf_timeval type Termios C.struct_termios -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - // Sysctl type Sysctlnode C.struct_sysctlnode diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go index 392da69bf0a..6c7c2279582 100644 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ b/vendor/golang.org/x/sys/unix/types_openbsd.go @@ -242,10 +242,3 @@ type BpfTimeval C.struct_bpf_timeval // Terminal handling type Termios C.struct_termios - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go index 6d7461401c2..393c7f04fb6 100644 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ b/vendor/golang.org/x/sys/unix/types_solaris.go @@ -256,6 +256,10 @@ type BpfTimeval C.struct_bpf_timeval type BpfHdr C.struct_bpf_hdr +// sysconf information + +const _SC_PAGESIZE = C._SC_PAGESIZE + // Terminal handling type Termios C.struct_termios diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go index 1c68758b647..8e63888351e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -1,5 +1,5 @@ // mkerrors.sh -m32 -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build 386,darwin @@ -48,7 +48,6 @@ const ( AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 - ALTWERASE = 0x200 B0 = 0x0 B110 = 0x6e B115200 = 0x1c200 @@ -139,26 +138,9 @@ const ( BPF_W = 0x0 BPF_X = 0x8 BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 CFLUSH = 0xf CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 CREAD = 0x800 - CRTSCTS = 0x30000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -350,14 +332,13 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf EVFILT_FS = -0x9 EVFILT_MACHPORT = -0x8 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf + EVFILT_SYSCOUNT = 0xe + EVFILT_THREADMARKER = 0xe EVFILT_TIMER = -0x7 EVFILT_USER = -0xa EVFILT_VM = -0xc @@ -368,7 +349,6 @@ const ( EV_DELETE = 0x2 EV_DISABLE = 0x8 EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 @@ -379,25 +359,16 @@ const ( EV_POLL = 0x1000 EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 FLUSHO = 0x800000 F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_RETURN = 0x61 F_ADDSIGS = 0x3b F_ALLOCATEALL = 0x4 F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 F_CHKCLEAN = 0x29 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x43 @@ -799,13 +770,11 @@ const ( MADV_FREE_REUSABLE = 0x7 MADV_FREE_REUSE = 0x8 MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa MADV_RANDOM = 0x1 MADV_SEQUENTIAL = 0x2 MADV_WILLNEED = 0x3 MADV_ZERO_WIRED_PAGES = 0x6 MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 @@ -817,43 +786,9 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x20 MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 MAP_SHARED = 0x1 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_ROOTFS = 0x4000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff - MNT_WAIT = 0x1 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 MSG_DONTWAIT = 0x80 @@ -884,13 +819,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 NOTE_ABSOLUTE = 0x8 NOTE_ATTRIB = 0x8 NOTE_BACKGROUND = 0x40 @@ -914,14 +843,11 @@ const ( NOTE_FFNOP = 0x0 NOTE_FFOR = 0x80000000 NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 NOTE_LEEWAY = 0x10 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 - NOTE_MACH_CONTINUOUS_TIME = 0x80 NOTE_NONE = 0x80 NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 NOTE_PCTRLMASK = -0x100000 NOTE_PDATAMASK = 0xfffff NOTE_REAP = 0x10000000 @@ -946,7 +872,6 @@ const ( ONOCR = 0x20 ONOEOT = 0x8 OPOST = 0x1 - OXTABS = 0x4 O_ACCMODE = 0x3 O_ALERT = 0x20000000 O_APPEND = 0x8 @@ -955,7 +880,6 @@ const ( O_CREAT = 0x200 O_DIRECTORY = 0x100000 O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 O_DSYNC = 0x400000 O_EVTONLY = 0x8000 O_EXCL = 0x800 @@ -1008,10 +932,7 @@ const ( RLIMIT_CPU_USAGE_MONITOR = 0x2 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 @@ -1181,8 +1102,6 @@ const ( SO_LABEL = 0x1010 SO_LINGER = 0x80 SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 SO_NKE = 0x1021 SO_NOADDRERR = 0x1023 SO_NOSIGPIPE = 0x1022 @@ -1238,22 +1157,11 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 TCIFLUSH = 0x1 - TCIOFF = 0x3 TCIOFLUSH = 0x3 - TCION = 0x4 TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 TCP_KEEPALIVE = 0x10 TCP_KEEPCNT = 0x102 TCP_KEEPINTVL = 0x101 @@ -1353,11 +1261,6 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 48f63d4f0c2..9594f93817a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1,5 +1,5 @@ // mkerrors.sh -m64 -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build amd64,darwin @@ -48,7 +48,6 @@ const ( AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 - ALTWERASE = 0x200 B0 = 0x0 B110 = 0x6e B115200 = 0x1c200 @@ -139,26 +138,9 @@ const ( BPF_W = 0x0 BPF_X = 0x8 BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 CFLUSH = 0xf CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 CREAD = 0x800 - CRTSCTS = 0x30000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -350,14 +332,13 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf EVFILT_FS = -0x9 EVFILT_MACHPORT = -0x8 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf + EVFILT_SYSCOUNT = 0xe + EVFILT_THREADMARKER = 0xe EVFILT_TIMER = -0x7 EVFILT_USER = -0xa EVFILT_VM = -0xc @@ -368,7 +349,6 @@ const ( EV_DELETE = 0x2 EV_DISABLE = 0x8 EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 @@ -379,25 +359,16 @@ const ( EV_POLL = 0x1000 EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 FLUSHO = 0x800000 F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_RETURN = 0x61 F_ADDSIGS = 0x3b F_ALLOCATEALL = 0x4 F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 F_CHKCLEAN = 0x29 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x43 @@ -799,13 +770,11 @@ const ( MADV_FREE_REUSABLE = 0x7 MADV_FREE_REUSE = 0x8 MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa MADV_RANDOM = 0x1 MADV_SEQUENTIAL = 0x2 MADV_WILLNEED = 0x3 MADV_ZERO_WIRED_PAGES = 0x6 MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 @@ -817,43 +786,9 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x20 MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 MAP_SHARED = 0x1 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_ROOTFS = 0x4000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff - MNT_WAIT = 0x1 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 MSG_DONTWAIT = 0x80 @@ -884,13 +819,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 NOTE_ABSOLUTE = 0x8 NOTE_ATTRIB = 0x8 NOTE_BACKGROUND = 0x40 @@ -914,14 +843,11 @@ const ( NOTE_FFNOP = 0x0 NOTE_FFOR = 0x80000000 NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 NOTE_LEEWAY = 0x10 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 - NOTE_MACH_CONTINUOUS_TIME = 0x80 NOTE_NONE = 0x80 NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 NOTE_PCTRLMASK = -0x100000 NOTE_PDATAMASK = 0xfffff NOTE_REAP = 0x10000000 @@ -946,7 +872,6 @@ const ( ONOCR = 0x20 ONOEOT = 0x8 OPOST = 0x1 - OXTABS = 0x4 O_ACCMODE = 0x3 O_ALERT = 0x20000000 O_APPEND = 0x8 @@ -955,7 +880,6 @@ const ( O_CREAT = 0x200 O_DIRECTORY = 0x100000 O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 O_DSYNC = 0x400000 O_EVTONLY = 0x8000 O_EXCL = 0x800 @@ -1008,10 +932,7 @@ const ( RLIMIT_CPU_USAGE_MONITOR = 0x2 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 @@ -1181,8 +1102,6 @@ const ( SO_LABEL = 0x1010 SO_LINGER = 0x80 SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 SO_NKE = 0x1021 SO_NOADDRERR = 0x1023 SO_NOSIGPIPE = 0x1022 @@ -1238,22 +1157,11 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 TCIFLUSH = 0x1 - TCIOFF = 0x3 TCIOFLUSH = 0x3 - TCION = 0x4 TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 TCP_KEEPALIVE = 0x10 TCP_KEEPCNT = 0x102 TCP_KEEPINTVL = 0x101 @@ -1353,11 +1261,6 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go index 24cb522d9b5..a410e88edde 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -1,11 +1,11 @@ // mkerrors.sh -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build arm,darwin +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- _const.go +// +build arm,darwin + package unix import "syscall" @@ -48,7 +48,6 @@ const ( AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 - ALTWERASE = 0x200 B0 = 0x0 B110 = 0x6e B115200 = 0x1c200 @@ -87,7 +86,6 @@ const ( BIOCSBLEN = 0xc0044266 BIOCSDLT = 0x80044278 BIOCSETF = 0x80104267 - BIOCSETFNR = 0x8010427e BIOCSETIF = 0x8020426c BIOCSHDRCMPLT = 0x80044275 BIOCSRSIG = 0x80044273 @@ -139,26 +137,9 @@ const ( BPF_W = 0x0 BPF_X = 0x8 BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 CFLUSH = 0xf CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 CREAD = 0x800 - CRTSCTS = 0x30000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -171,168 +152,33 @@ const ( CSUSP = 0x1a CTL_MAXNAME = 0xc CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde DLT_APPLE_IP_OVER_IEEE1394 = 0x8a DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 DLT_ATM_CLIP = 0x13 DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 DLT_CHAOS = 0x5 DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 DLT_EN10MB = 0x1 DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 DLT_IEEE802 = 0x6 DLT_IEEE802_11 = 0x69 DLT_IEEE802_11_RADIO = 0x7f DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0xf5 - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 - DLT_PPI = 0xc0 DLT_PPP = 0x9 DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 DLT_RAW = 0xc - DLT_RIO = 0x7c - DLT_SCCP = 0x8e - DLT_SITA = 0xc4 DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WIHART = 0xdf - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -350,14 +196,13 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf EVFILT_FS = -0x9 EVFILT_MACHPORT = -0x8 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf + EVFILT_SYSCOUNT = 0xe + EVFILT_THREADMARKER = 0xe EVFILT_TIMER = -0x7 EVFILT_USER = -0xa EVFILT_VM = -0xc @@ -368,7 +213,6 @@ const ( EV_DELETE = 0x2 EV_DISABLE = 0x8 EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 @@ -379,25 +223,16 @@ const ( EV_POLL = 0x1000 EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 FLUSHO = 0x800000 F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_RETURN = 0x61 F_ADDSIGS = 0x3b F_ALLOCATEALL = 0x4 F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 F_CHKCLEAN = 0x29 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x43 @@ -512,7 +347,6 @@ const ( IFT_PDP = 0xff IFT_PFLOG = 0xf5 IFT_PFSYNC = 0xf6 - IFT_PKTAP = 0xfe IFT_PPP = 0x17 IFT_PROPMUX = 0x36 IFT_PROPVIRTUAL = 0x35 @@ -681,7 +515,7 @@ const ( IPV6_FAITH = 0x1d IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FRAGTTL = 0x3c + IPV6_FRAGTTL = 0x78 IPV6_FW_ADD = 0x1e IPV6_FW_DEL = 0x1f IPV6_FW_FLUSH = 0x20 @@ -799,13 +633,11 @@ const ( MADV_FREE_REUSABLE = 0x7 MADV_FREE_REUSE = 0x8 MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa MADV_RANDOM = 0x1 MADV_SEQUENTIAL = 0x2 MADV_WILLNEED = 0x3 MADV_ZERO_WIRED_PAGES = 0x6 MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 @@ -817,43 +649,9 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x20 MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 MAP_SHARED = 0x1 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_ROOTFS = 0x4000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff - MNT_WAIT = 0x1 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 MSG_DONTWAIT = 0x80 @@ -884,13 +682,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 NOTE_ABSOLUTE = 0x8 NOTE_ATTRIB = 0x8 NOTE_BACKGROUND = 0x40 @@ -914,14 +706,11 @@ const ( NOTE_FFNOP = 0x0 NOTE_FFOR = 0x80000000 NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 NOTE_LEEWAY = 0x10 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 - NOTE_MACH_CONTINUOUS_TIME = 0x80 NOTE_NONE = 0x80 NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 NOTE_PCTRLMASK = -0x100000 NOTE_PDATAMASK = 0xfffff NOTE_REAP = 0x10000000 @@ -946,7 +735,6 @@ const ( ONOCR = 0x20 ONOEOT = 0x8 OPOST = 0x1 - OXTABS = 0x4 O_ACCMODE = 0x3 O_ALERT = 0x20000000 O_APPEND = 0x8 @@ -955,7 +743,6 @@ const ( O_CREAT = 0x200 O_DIRECTORY = 0x100000 O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 O_DSYNC = 0x400000 O_EVTONLY = 0x8000 O_EXCL = 0x800 @@ -1008,10 +795,7 @@ const ( RLIMIT_CPU_USAGE_MONITOR = 0x2 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 @@ -1046,7 +830,6 @@ const ( RTF_LOCAL = 0x200000 RTF_MODIFIED = 0x20 RTF_MULTICAST = 0x800000 - RTF_NOIFREF = 0x2000 RTF_PINNED = 0x100000 RTF_PRCLONING = 0x10000 RTF_PROTO1 = 0x8000 @@ -1181,8 +964,6 @@ const ( SO_LABEL = 0x1010 SO_LINGER = 0x80 SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 SO_NKE = 0x1021 SO_NOADDRERR = 0x1023 SO_NOSIGPIPE = 0x1022 @@ -1238,22 +1019,11 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 TCIFLUSH = 0x1 - TCIOFF = 0x3 TCIOFLUSH = 0x3 - TCION = 0x4 TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 TCP_KEEPALIVE = 0x10 TCP_KEEPCNT = 0x102 TCP_KEEPINTVL = 0x101 @@ -1353,11 +1123,6 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc @@ -1526,148 +1291,3 @@ const ( SIGXCPU = syscall.Signal(0x18) SIGXFSZ = syscall.Signal(0x19) ) - -// Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "resource busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "device power is off", - 83: "device error", - 84: "value too large to be stored in data type", - 85: "bad executable (or shared library)", - 86: "bad CPU type in executable", - 87: "shared library version mismatch", - 88: "malformed Mach-o file", - 89: "operation canceled", - 90: "identifier removed", - 91: "no message of desired type", - 92: "illegal byte sequence", - 93: "attribute not found", - 94: "bad message", - 95: "EMULTIHOP (Reserved)", - 96: "no message available on STREAM", - 97: "ENOLINK (Reserved)", - 98: "no STREAM resources", - 99: "not a STREAM", - 100: "protocol error", - 101: "STREAM ioctl timeout", - 102: "operation not supported on socket", - 103: "policy not found", - 104: "state not recoverable", - 105: "previous owner died", - 106: "interface output queue is full", -} - -// Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index cc8cc5b57c5..3189c6b3459 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1,5 +1,5 @@ // mkerrors.sh -m64 -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build arm64,darwin @@ -48,7 +48,6 @@ const ( AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 - ALTWERASE = 0x200 B0 = 0x0 B110 = 0x6e B115200 = 0x1c200 @@ -139,26 +138,9 @@ const ( BPF_W = 0x0 BPF_X = 0x8 BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 CFLUSH = 0xf CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_MONOTONIC_RAW_APPROX = 0x5 - CLOCK_PROCESS_CPUTIME_ID = 0xc - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x10 - CLOCK_UPTIME_RAW = 0x8 - CLOCK_UPTIME_RAW_APPROX = 0x9 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRDLY = 0x3000 CREAD = 0x800 - CRTSCTS = 0x30000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -350,14 +332,13 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 - EVFILT_EXCEPT = -0xf EVFILT_FS = -0x9 EVFILT_MACHPORT = -0x8 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xf - EVFILT_THREADMARKER = 0xf + EVFILT_SYSCOUNT = 0xe + EVFILT_THREADMARKER = 0xe EVFILT_TIMER = -0x7 EVFILT_USER = -0xa EVFILT_VM = -0xc @@ -368,7 +349,6 @@ const ( EV_DELETE = 0x2 EV_DISABLE = 0x8 EV_DISPATCH = 0x80 - EV_DISPATCH2 = 0x180 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 @@ -379,25 +359,16 @@ const ( EV_POLL = 0x1000 EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 - EV_UDATA_SPECIFIC = 0x100 - EV_VANISHED = 0x200 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 FLUSHO = 0x800000 F_ADDFILESIGS = 0x3d - F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 - F_ADDFILESIGS_RETURN = 0x61 F_ADDSIGS = 0x3b F_ALLOCATEALL = 0x4 F_ALLOCATECONTIG = 0x2 - F_BARRIERFSYNC = 0x55 - F_CHECK_LV = 0x62 F_CHKCLEAN = 0x29 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x43 @@ -799,13 +770,11 @@ const ( MADV_FREE_REUSABLE = 0x7 MADV_FREE_REUSE = 0x8 MADV_NORMAL = 0x0 - MADV_PAGEOUT = 0xa MADV_RANDOM = 0x1 MADV_SEQUENTIAL = 0x2 MADV_WILLNEED = 0x3 MADV_ZERO_WIRED_PAGES = 0x6 MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 @@ -817,43 +786,9 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x20 MAP_RESERVED0080 = 0x80 - MAP_RESILIENT_CODESIGN = 0x2000 - MAP_RESILIENT_MEDIA = 0x4000 MAP_SHARED = 0x1 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 - MNT_ASYNC = 0x40 - MNT_AUTOMOUNTED = 0x400000 - MNT_CMDFLAGS = 0xf0000 - MNT_CPROTECT = 0x80 - MNT_DEFWRITE = 0x2000000 - MNT_DONTBROWSE = 0x100000 - MNT_DOVOLFS = 0x8000 - MNT_DWAIT = 0x4 - MNT_EXPORTED = 0x100 - MNT_FORCE = 0x80000 - MNT_IGNORE_OWNERSHIP = 0x200000 - MNT_JOURNALED = 0x800000 - MNT_LOCAL = 0x1000 - MNT_MULTILABEL = 0x4000000 - MNT_NOATIME = 0x10000000 - MNT_NOBLOCK = 0x20000 - MNT_NODEV = 0x10 - MNT_NOEXEC = 0x4 - MNT_NOSUID = 0x8 - MNT_NOUSERXATTR = 0x1000000 - MNT_NOWAIT = 0x2 - MNT_QUARANTINE = 0x400 - MNT_QUOTA = 0x2000 - MNT_RDONLY = 0x1 - MNT_RELOAD = 0x40000 - MNT_ROOTFS = 0x4000 - MNT_SYNCHRONOUS = 0x2 - MNT_UNION = 0x20 - MNT_UNKNOWNPERMISSIONS = 0x200000 - MNT_UPDATE = 0x10000 - MNT_VISFLAGMASK = 0x17f0f5ff - MNT_WAIT = 0x1 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 MSG_DONTWAIT = 0x80 @@ -884,13 +819,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLDLY = 0x300 NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 NOTE_ABSOLUTE = 0x8 NOTE_ATTRIB = 0x8 NOTE_BACKGROUND = 0x40 @@ -914,14 +843,11 @@ const ( NOTE_FFNOP = 0x0 NOTE_FFOR = 0x80000000 NOTE_FORK = 0x40000000 - NOTE_FUNLOCK = 0x100 NOTE_LEEWAY = 0x10 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 - NOTE_MACH_CONTINUOUS_TIME = 0x80 NOTE_NONE = 0x80 NOTE_NSECONDS = 0x4 - NOTE_OOB = 0x2 NOTE_PCTRLMASK = -0x100000 NOTE_PDATAMASK = 0xfffff NOTE_REAP = 0x10000000 @@ -946,7 +872,6 @@ const ( ONOCR = 0x20 ONOEOT = 0x8 OPOST = 0x1 - OXTABS = 0x4 O_ACCMODE = 0x3 O_ALERT = 0x20000000 O_APPEND = 0x8 @@ -955,7 +880,6 @@ const ( O_CREAT = 0x200 O_DIRECTORY = 0x100000 O_DP_GETRAWENCRYPTED = 0x1 - O_DP_GETRAWUNENCRYPTED = 0x2 O_DSYNC = 0x400000 O_EVTONLY = 0x8000 O_EXCL = 0x800 @@ -1008,10 +932,7 @@ const ( RLIMIT_CPU_USAGE_MONITOR = 0x2 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 @@ -1181,8 +1102,6 @@ const ( SO_LABEL = 0x1010 SO_LINGER = 0x80 SO_LINGER_SEC = 0x1080 - SO_NETSVC_MARKING_LEVEL = 0x1119 - SO_NET_SERVICE_TYPE = 0x1116 SO_NKE = 0x1021 SO_NOADDRERR = 0x1023 SO_NOSIGPIPE = 0x1022 @@ -1238,22 +1157,11 @@ const ( S_IXGRP = 0x8 S_IXOTH = 0x1 S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0x4 - TABDLY = 0xc04 TCIFLUSH = 0x1 - TCIOFF = 0x3 TCIOFLUSH = 0x3 - TCION = 0x4 TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 TCP_CONNECTIONTIMEOUT = 0x20 - TCP_CONNECTION_INFO = 0x106 TCP_ENABLE_ECN = 0x104 - TCP_FASTOPEN = 0x105 TCP_KEEPALIVE = 0x10 TCP_KEEPCNT = 0x102 TCP_KEEPINTVL = 0x101 @@ -1353,11 +1261,6 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 - VM_LOADAVG = 0x2 - VM_MACHFACTOR = 0x4 - VM_MAXID = 0x6 - VM_METER = 0x1 - VM_SWAPUSAGE = 0x5 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 1d3eec44d49..7b95751c3db 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -1,5 +1,5 @@ // mkerrors.sh -m32 -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build 386,freebsd @@ -11,1419 +11,1456 @@ package unix import "syscall" const ( - AF_APPLETALK = 0x10 - AF_ARP = 0x23 - AF_ATM = 0x1e - AF_BLUETOOTH = 0x24 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1a - AF_ECMA = 0x8 - AF_HYLINK = 0xf - AF_IEEE80211 = 0x25 - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x1c - AF_INET6_SDP = 0x2a - AF_INET_SDP = 0x28 - AF_IPX = 0x17 - AF_ISDN = 0x1a - AF_ISO = 0x7 - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x2a - AF_NATM = 0x1d - AF_NETBIOS = 0x6 - AF_NETGRAPH = 0x20 - AF_OSI = 0x7 - AF_PUP = 0x4 - AF_ROUTE = 0x11 - AF_SCLUSTER = 0x22 - AF_SIP = 0x18 - AF_SLOW = 0x21 - AF_SNA = 0xb - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VENDOR00 = 0x27 - AF_VENDOR01 = 0x29 - AF_VENDOR02 = 0x2b - AF_VENDOR03 = 0x2d - AF_VENDOR04 = 0x2f - AF_VENDOR05 = 0x31 - AF_VENDOR06 = 0x33 - AF_VENDOR07 = 0x35 - AF_VENDOR08 = 0x37 - AF_VENDOR09 = 0x39 - AF_VENDOR10 = 0x3b - AF_VENDOR11 = 0x3d - AF_VENDOR12 = 0x3f - AF_VENDOR13 = 0x41 - AF_VENDOR14 = 0x43 - AF_VENDOR15 = 0x45 - AF_VENDOR16 = 0x47 - AF_VENDOR17 = 0x49 - AF_VENDOR18 = 0x4b - AF_VENDOR19 = 0x4d - AF_VENDOR20 = 0x4f - AF_VENDOR21 = 0x51 - AF_VENDOR22 = 0x53 - AF_VENDOR23 = 0x55 - AF_VENDOR24 = 0x57 - AF_VENDOR25 = 0x59 - AF_VENDOR26 = 0x5b - AF_VENDOR27 = 0x5d - AF_VENDOR28 = 0x5f - AF_VENDOR29 = 0x61 - AF_VENDOR30 = 0x63 - AF_VENDOR31 = 0x65 - AF_VENDOR32 = 0x67 - AF_VENDOR33 = 0x69 - AF_VENDOR34 = 0x6b - AF_VENDOR35 = 0x6d - AF_VENDOR36 = 0x6f - AF_VENDOR37 = 0x71 - AF_VENDOR38 = 0x73 - AF_VENDOR39 = 0x75 - AF_VENDOR40 = 0x77 - AF_VENDOR41 = 0x79 - AF_VENDOR42 = 0x7b - AF_VENDOR43 = 0x7d - AF_VENDOR44 = 0x7f - AF_VENDOR45 = 0x81 - AF_VENDOR46 = 0x83 - AF_VENDOR47 = 0x85 - ALTWERASE = 0x200 - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B460800 = 0x70800 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B921600 = 0xe1000 - B9600 = 0x2580 - BIOCFEEDBACK = 0x8004427c - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDIRECTION = 0x40044276 - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc0084279 - BIOCGETBUFMODE = 0x4004427d - BIOCGETIF = 0x4020426b - BIOCGETZMAX = 0x4004427f - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044272 - BIOCGRTIMEOUT = 0x4008426e - BIOCGSEESENT = 0x40044276 - BIOCGSTATS = 0x4008426f - BIOCGTSTAMP = 0x40044283 - BIOCIMMEDIATE = 0x80044270 - BIOCLOCK = 0x2000427a - BIOCPROMISC = 0x20004269 - BIOCROTZBUF = 0x400c4280 - BIOCSBLEN = 0xc0044266 - BIOCSDIRECTION = 0x80044277 - BIOCSDLT = 0x80044278 - BIOCSETBUFMODE = 0x8004427e - BIOCSETF = 0x80084267 - BIOCSETFNR = 0x80084282 - BIOCSETIF = 0x8020426c - BIOCSETWF = 0x8008427b - BIOCSETZBUF = 0x800c4281 - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044273 - BIOCSRTIMEOUT = 0x8008426d - BIOCSSEESENT = 0x80044277 - BIOCSTSTAMP = 0x80044284 - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x4 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_BUFMODE_BUFFER = 0x1 - BPF_BUFMODE_ZBUF = 0x2 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x80000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_T_BINTIME = 0x2 - BPF_T_BINTIME_FAST = 0x102 - BPF_T_BINTIME_MONOTONIC = 0x202 - BPF_T_BINTIME_MONOTONIC_FAST = 0x302 - BPF_T_FAST = 0x100 - BPF_T_FLAG_MASK = 0x300 - BPF_T_FORMAT_MASK = 0x3 - BPF_T_MICROTIME = 0x0 - BPF_T_MICROTIME_FAST = 0x100 - BPF_T_MICROTIME_MONOTONIC = 0x200 - BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 - BPF_T_MONOTONIC = 0x200 - BPF_T_MONOTONIC_FAST = 0x300 - BPF_T_NANOTIME = 0x1 - BPF_T_NANOTIME_FAST = 0x101 - BPF_T_NANOTIME_MONOTONIC = 0x201 - BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 - BPF_T_NONE = 0x3 - BPF_T_NORMAL = 0x0 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XOR = 0xa0 - BRKINT = 0x2 - CAP_ACCEPT = 0x200000020000000 - CAP_ACL_CHECK = 0x400000000010000 - CAP_ACL_DELETE = 0x400000000020000 - CAP_ACL_GET = 0x400000000040000 - CAP_ACL_SET = 0x400000000080000 - CAP_ALL0 = 0x20007ffffffffff - CAP_ALL1 = 0x4000000001fffff - CAP_BIND = 0x200000040000000 - CAP_BINDAT = 0x200008000000400 - CAP_CHFLAGSAT = 0x200000000001400 - CAP_CONNECT = 0x200000080000000 - CAP_CONNECTAT = 0x200010000000400 - CAP_CREATE = 0x200000000000040 - CAP_EVENT = 0x400000000000020 - CAP_EXTATTR_DELETE = 0x400000000001000 - CAP_EXTATTR_GET = 0x400000000002000 - CAP_EXTATTR_LIST = 0x400000000004000 - CAP_EXTATTR_SET = 0x400000000008000 - CAP_FCHDIR = 0x200000000000800 - CAP_FCHFLAGS = 0x200000000001000 - CAP_FCHMOD = 0x200000000002000 - CAP_FCHMODAT = 0x200000000002400 - CAP_FCHOWN = 0x200000000004000 - CAP_FCHOWNAT = 0x200000000004400 - CAP_FCNTL = 0x200000000008000 - CAP_FCNTL_ALL = 0x78 - CAP_FCNTL_GETFL = 0x8 - CAP_FCNTL_GETOWN = 0x20 - CAP_FCNTL_SETFL = 0x10 - CAP_FCNTL_SETOWN = 0x40 - CAP_FEXECVE = 0x200000000000080 - CAP_FLOCK = 0x200000000010000 - CAP_FPATHCONF = 0x200000000020000 - CAP_FSCK = 0x200000000040000 - CAP_FSTAT = 0x200000000080000 - CAP_FSTATAT = 0x200000000080400 - CAP_FSTATFS = 0x200000000100000 - CAP_FSYNC = 0x200000000000100 - CAP_FTRUNCATE = 0x200000000000200 - CAP_FUTIMES = 0x200000000200000 - CAP_FUTIMESAT = 0x200000000200400 - CAP_GETPEERNAME = 0x200000100000000 - CAP_GETSOCKNAME = 0x200000200000000 - CAP_GETSOCKOPT = 0x200000400000000 - CAP_IOCTL = 0x400000000000080 - CAP_IOCTLS_ALL = 0x7fffffff - CAP_KQUEUE = 0x400000000100040 - CAP_KQUEUE_CHANGE = 0x400000000100000 - CAP_KQUEUE_EVENT = 0x400000000000040 - CAP_LINKAT_SOURCE = 0x200020000000400 - CAP_LINKAT_TARGET = 0x200000000400400 - CAP_LISTEN = 0x200000800000000 - CAP_LOOKUP = 0x200000000000400 - CAP_MAC_GET = 0x400000000000001 - CAP_MAC_SET = 0x400000000000002 - CAP_MKDIRAT = 0x200000000800400 - CAP_MKFIFOAT = 0x200000001000400 - CAP_MKNODAT = 0x200000002000400 - CAP_MMAP = 0x200000000000010 - CAP_MMAP_R = 0x20000000000001d - CAP_MMAP_RW = 0x20000000000001f - CAP_MMAP_RWX = 0x20000000000003f - CAP_MMAP_RX = 0x20000000000003d - CAP_MMAP_W = 0x20000000000001e - CAP_MMAP_WX = 0x20000000000003e - CAP_MMAP_X = 0x20000000000003c - CAP_PDGETPID = 0x400000000000200 - CAP_PDKILL = 0x400000000000800 - CAP_PDWAIT = 0x400000000000400 - CAP_PEELOFF = 0x200001000000000 - CAP_POLL_EVENT = 0x400000000000020 - CAP_PREAD = 0x20000000000000d - CAP_PWRITE = 0x20000000000000e - CAP_READ = 0x200000000000001 - CAP_RECV = 0x200000000000001 - CAP_RENAMEAT_SOURCE = 0x200000004000400 - CAP_RENAMEAT_TARGET = 0x200040000000400 - CAP_RIGHTS_VERSION = 0x0 - CAP_RIGHTS_VERSION_00 = 0x0 - CAP_SEEK = 0x20000000000000c - CAP_SEEK_TELL = 0x200000000000004 - CAP_SEM_GETVALUE = 0x400000000000004 - CAP_SEM_POST = 0x400000000000008 - CAP_SEM_WAIT = 0x400000000000010 - CAP_SEND = 0x200000000000002 - CAP_SETSOCKOPT = 0x200002000000000 - CAP_SHUTDOWN = 0x200004000000000 - CAP_SOCK_CLIENT = 0x200007780000003 - CAP_SOCK_SERVER = 0x200007f60000003 - CAP_SYMLINKAT = 0x200000008000400 - CAP_TTYHOOK = 0x400000000000100 - CAP_UNLINKAT = 0x200000010000400 - CAP_UNUSED0_44 = 0x200080000000000 - CAP_UNUSED0_57 = 0x300000000000000 - CAP_UNUSED1_22 = 0x400000000200000 - CAP_UNUSED1_57 = 0x500000000000000 - CAP_WRITE = 0x200000000000002 - CFLUSH = 0xf - CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x4 - CLOCK_MONOTONIC_FAST = 0xc - CLOCK_MONOTONIC_PRECISE = 0xb - CLOCK_PROCESS_CPUTIME_ID = 0xf - CLOCK_PROF = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_FAST = 0xa - CLOCK_REALTIME_PRECISE = 0x9 - CLOCK_SECOND = 0xd - CLOCK_THREAD_CPUTIME_ID = 0xe - CLOCK_UPTIME = 0x5 - CLOCK_UPTIME_FAST = 0x8 - CLOCK_UPTIME_PRECISE = 0x7 - CLOCK_VIRTUAL = 0x1 - CREAD = 0x800 - CRTSCTS = 0x30000 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x14 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTL_MAXNAME = 0x18 - CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde - DLT_APPLE_IP_OVER_IEEE1394 = 0x8a - DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 - DLT_ATM_CLIP = 0x13 - DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e - DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_BREDR_BB = 0xff - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_BLUETOOTH_LE_LL = 0xfb - DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 - DLT_BLUETOOTH_LINUX_MONITOR = 0xfe - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 - DLT_CHAOS = 0x5 - DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 - DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_EPON = 0x103 - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 - DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_INFINIBAND = 0xf7 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPMI_HPM_2 = 0x104 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 - DLT_LINUX_SLL = 0x71 - DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x104 - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NETLINK = 0xfd - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 - DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x79 - DLT_PKTAP = 0x102 - DLT_PPI = 0xc0 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 - DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 - DLT_PROFIBUS_DL = 0x101 - DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 - DLT_RAW = 0xc - DLT_RIO = 0x7c - DLT_RTAC_SERIAL = 0xfa - DLT_SCCP = 0x8e - DLT_SCTP = 0xf8 - DLT_SITA = 0xc4 - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USBPCAP = 0xf9 - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WIHART = 0xdf - DLT_WIRESHARK_UPPER_PDU = 0xfc - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EVFILT_AIO = -0x3 - EVFILT_FS = -0x9 - EVFILT_LIO = -0xa - EVFILT_PROC = -0x5 - EVFILT_PROCDESC = -0x8 - EVFILT_READ = -0x1 - EVFILT_SENDFILE = -0xc - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc - EVFILT_TIMER = -0x7 - EVFILT_USER = -0xb - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_DISPATCH = 0x80 - EV_DROP = 0x1000 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG1 = 0x2000 - EV_FLAG2 = 0x4000 - EV_FORCEONESHOT = 0x100 - EV_ONESHOT = 0x10 - EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 - EXTA = 0x4b00 - EXTATTR_NAMESPACE_EMPTY = 0x0 - EXTATTR_NAMESPACE_SYSTEM = 0x2 - EXTATTR_NAMESPACE_USER = 0x1 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FLUSHO = 0x800000 - F_CANCEL = 0x5 - F_DUP2FD = 0xa - F_DUP2FD_CLOEXEC = 0x12 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x11 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0xb - F_GETOWN = 0x5 - F_OGETLK = 0x7 - F_OK = 0x0 - F_OSETLK = 0x8 - F_OSETLKW = 0x9 - F_RDAHEAD = 0x10 - F_RDLCK = 0x1 - F_READAHEAD = 0xf - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0xc - F_SETLKW = 0xd - F_SETLK_REMOTE = 0xe - F_SETOWN = 0x6 - F_UNLCK = 0x2 - F_UNLCKSYS = 0x4 - F_WRLCK = 0x3 - HUPCL = 0x4000 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFAN_ARRIVAL = 0x0 - IFAN_DEPARTURE = 0x1 - IFF_ALLMULTI = 0x200 - IFF_ALTPHYS = 0x4000 - IFF_BROADCAST = 0x2 - IFF_CANTCHANGE = 0x218f52 - IFF_CANTCONFIG = 0x10000 - IFF_DEBUG = 0x4 - IFF_DRV_OACTIVE = 0x400 - IFF_DRV_RUNNING = 0x40 - IFF_DYING = 0x200000 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MONITOR = 0x40000 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PPROMISC = 0x20000 - IFF_PROMISC = 0x100 - IFF_RENAMING = 0x400000 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_STATICARP = 0x80000 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_BRIDGE = 0xd1 - IFT_CARP = 0xf8 - IFT_IEEE1394 = 0x90 - IFT_INFINIBAND = 0xc7 - IFT_L2VLAN = 0x87 - IFT_L3IPVLAN = 0x88 - IFT_PPP = 0x17 - IFT_PROPVIRTUAL = 0x35 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LOOPBACKNET = 0x7f - IN_RFC3021_MASK = 0xfffffffe - IPPROTO_3PC = 0x22 - IPPROTO_ADFS = 0x44 - IPPROTO_AH = 0x33 - IPPROTO_AHIP = 0x3d - IPPROTO_APES = 0x63 - IPPROTO_ARGUS = 0xd - IPPROTO_AX25 = 0x5d - IPPROTO_BHA = 0x31 - IPPROTO_BLT = 0x1e - IPPROTO_BRSATMON = 0x4c - IPPROTO_CARP = 0x70 - IPPROTO_CFTP = 0x3e - IPPROTO_CHAOS = 0x10 - IPPROTO_CMTP = 0x26 - IPPROTO_CPHB = 0x49 - IPPROTO_CPNX = 0x48 - IPPROTO_DDP = 0x25 - IPPROTO_DGP = 0x56 - IPPROTO_DIVERT = 0x102 - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_EMCON = 0xe - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GMTP = 0x64 - IPPROTO_GRE = 0x2f - IPPROTO_HELLO = 0x3f - IPPROTO_HIP = 0x8b - IPPROTO_HMP = 0x14 - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IDPR = 0x23 - IPPROTO_IDRP = 0x2d - IPPROTO_IGMP = 0x2 - IPPROTO_IGP = 0x55 - IPPROTO_IGRP = 0x58 - IPPROTO_IL = 0x28 - IPPROTO_INLSP = 0x34 - IPPROTO_INP = 0x20 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPCV = 0x47 - IPPROTO_IPEIP = 0x5e - IPPROTO_IPIP = 0x4 - IPPROTO_IPPC = 0x43 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_IRTP = 0x1c - IPPROTO_KRYPTOLAN = 0x41 - IPPROTO_LARP = 0x5b - IPPROTO_LEAF1 = 0x19 - IPPROTO_LEAF2 = 0x1a - IPPROTO_MAX = 0x100 - IPPROTO_MEAS = 0x13 - IPPROTO_MH = 0x87 - IPPROTO_MHRP = 0x30 - IPPROTO_MICP = 0x5f - IPPROTO_MOBILE = 0x37 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_MUX = 0x12 - IPPROTO_ND = 0x4d - IPPROTO_NHRP = 0x36 - IPPROTO_NONE = 0x3b - IPPROTO_NSP = 0x1f - IPPROTO_NVPII = 0xb - IPPROTO_OLD_DIVERT = 0xfe - IPPROTO_OSPFIGP = 0x59 - IPPROTO_PFSYNC = 0xf0 - IPPROTO_PGM = 0x71 - IPPROTO_PIGP = 0x9 - IPPROTO_PIM = 0x67 - IPPROTO_PRM = 0x15 - IPPROTO_PUP = 0xc - IPPROTO_PVP = 0x4b - IPPROTO_RAW = 0xff - IPPROTO_RCCMON = 0xa - IPPROTO_RDP = 0x1b - IPPROTO_RESERVED_253 = 0xfd - IPPROTO_RESERVED_254 = 0xfe - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_RVD = 0x42 - IPPROTO_SATEXPAK = 0x40 - IPPROTO_SATMON = 0x45 - IPPROTO_SCCSP = 0x60 - IPPROTO_SCTP = 0x84 - IPPROTO_SDRP = 0x2a - IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 - IPPROTO_SHIM6 = 0x8c - IPPROTO_SKIP = 0x39 - IPPROTO_SPACER = 0x7fff - IPPROTO_SRPC = 0x5a - IPPROTO_ST = 0x7 - IPPROTO_SVMTP = 0x52 - IPPROTO_SWIPE = 0x35 - IPPROTO_TCF = 0x57 - IPPROTO_TCP = 0x6 - IPPROTO_TLSP = 0x38 - IPPROTO_TP = 0x1d - IPPROTO_TPXX = 0x27 - IPPROTO_TRUNK1 = 0x17 - IPPROTO_TRUNK2 = 0x18 - IPPROTO_TTP = 0x54 - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPPROTO_VINES = 0x53 - IPPROTO_VISA = 0x46 - IPPROTO_VMTP = 0x51 - IPPROTO_WBEXPAK = 0x4f - IPPROTO_WBMON = 0x4e - IPPROTO_WSN = 0x4a - IPPROTO_XNET = 0xf - IPPROTO_XTP = 0x24 - IPV6_AUTOFLOWLABEL = 0x3b - IPV6_BINDANY = 0x40 - IPV6_BINDMULTI = 0x41 - IPV6_BINDV6ONLY = 0x1b - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_DONTFRAG = 0x3e - IPV6_DSTOPTS = 0x32 - IPV6_FLOWID = 0x43 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOWTYPE = 0x44 - IPV6_FRAGTTL = 0x78 - IPV6_FW_ADD = 0x1e - IPV6_FW_DEL = 0x1f - IPV6_FW_FLUSH = 0x20 - IPV6_FW_GET = 0x22 - IPV6_FW_ZERO = 0x21 - IPV6_HLIMDEC = 0x1 - IPV6_HOPLIMIT = 0x2f - IPV6_HOPOPTS = 0x31 - IPV6_IPSEC_POLICY = 0x1c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXOPTHDR = 0x800 - IPV6_MAXPACKET = 0xffff - IPV6_MAX_GROUP_SRC_FILTER = 0x200 - IPV6_MAX_MEMBERSHIPS = 0xfff - IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f - IPV6_MMTU = 0x500 - IPV6_MSFILTER = 0x4a - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_NEXTHOP = 0x30 - IPV6_PATHMTU = 0x2c - IPV6_PKTINFO = 0x2e - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_PREFER_TEMPADDR = 0x3f - IPV6_RECVDSTOPTS = 0x28 - IPV6_RECVFLOWID = 0x46 - IPV6_RECVHOPLIMIT = 0x25 - IPV6_RECVHOPOPTS = 0x27 - IPV6_RECVPATHMTU = 0x2b - IPV6_RECVPKTINFO = 0x24 - IPV6_RECVRSSBUCKETID = 0x47 - IPV6_RECVRTHDR = 0x26 - IPV6_RECVTCLASS = 0x39 - IPV6_RSSBUCKETID = 0x45 - IPV6_RSS_LISTEN_BUCKET = 0x42 - IPV6_RTHDR = 0x33 - IPV6_RTHDRDSTOPTS = 0x23 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x3d - IPV6_UNICAST_HOPS = 0x4 - IPV6_USE_MIN_MTU = 0x2a - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_ADD_SOURCE_MEMBERSHIP = 0x46 - IP_BINDANY = 0x18 - IP_BINDMULTI = 0x19 - IP_BLOCK_SOURCE = 0x48 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DONTFRAG = 0x43 - IP_DROP_MEMBERSHIP = 0xd - IP_DROP_SOURCE_MEMBERSHIP = 0x47 - IP_DUMMYNET3 = 0x31 - IP_DUMMYNET_CONFIGURE = 0x3c - IP_DUMMYNET_DEL = 0x3d - IP_DUMMYNET_FLUSH = 0x3e - IP_DUMMYNET_GET = 0x40 - IP_FLOWID = 0x5a - IP_FLOWTYPE = 0x5b - IP_FW3 = 0x30 - IP_FW_ADD = 0x32 - IP_FW_DEL = 0x33 - IP_FW_FLUSH = 0x34 - IP_FW_GET = 0x36 - IP_FW_NAT_CFG = 0x38 - IP_FW_NAT_DEL = 0x39 - IP_FW_NAT_GET_CONFIG = 0x3a - IP_FW_NAT_GET_LOG = 0x3b - IP_FW_RESETLOG = 0x37 - IP_FW_TABLE_ADD = 0x28 - IP_FW_TABLE_DEL = 0x29 - IP_FW_TABLE_FLUSH = 0x2a - IP_FW_TABLE_GETSIZE = 0x2b - IP_FW_TABLE_LIST = 0x2c - IP_FW_ZERO = 0x35 - IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 - IP_MAXPACKET = 0xffff - IP_MAX_GROUP_SRC_FILTER = 0x200 - IP_MAX_MEMBERSHIPS = 0xfff - IP_MAX_SOCK_MUTE_FILTER = 0x80 - IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MF = 0x2000 - IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f - IP_MSFILTER = 0x4a - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_VIF = 0xe - IP_OFFMASK = 0x1fff - IP_ONESBCAST = 0x17 - IP_OPTIONS = 0x1 - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVFLOWID = 0x5d - IP_RECVIF = 0x14 - IP_RECVOPTS = 0x5 - IP_RECVRETOPTS = 0x6 - IP_RECVRSSBUCKETID = 0x5e - IP_RECVTOS = 0x44 - IP_RECVTTL = 0x41 - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RSSBUCKETID = 0x5c - IP_RSS_LISTEN_BUCKET = 0x1a - IP_RSVP_OFF = 0x10 - IP_RSVP_ON = 0xf - IP_RSVP_VIF_OFF = 0x12 - IP_RSVP_VIF_ON = 0x11 - IP_SENDSRCADDR = 0x7 - IP_TOS = 0x3 - IP_TTL = 0x4 - IP_UNBLOCK_SOURCE = 0x49 - ISIG = 0x80 - ISTRIP = 0x20 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_AUTOSYNC = 0x7 - MADV_CORE = 0x9 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x5 - MADV_NOCORE = 0x8 - MADV_NORMAL = 0x0 - MADV_NOSYNC = 0x6 - MADV_PROTECT = 0xa - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_WILLNEED = 0x3 - MAP_ALIGNED_SUPER = 0x1000000 - MAP_ALIGNMENT_MASK = -0x1000000 - MAP_ALIGNMENT_SHIFT = 0x18 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_EXCL = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_HASSEMAPHORE = 0x200 - MAP_NOCORE = 0x20000 - MAP_NOSYNC = 0x800 - MAP_PREFAULT_READ = 0x40000 - MAP_PRIVATE = 0x2 - MAP_RESERVED0020 = 0x20 - MAP_RESERVED0040 = 0x40 - MAP_RESERVED0080 = 0x80 - MAP_RESERVED0100 = 0x100 - MAP_SHARED = 0x1 - MAP_STACK = 0x400 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MSG_CMSG_CLOEXEC = 0x40000 - MSG_COMPAT = 0x8000 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOF = 0x100 - MSG_EOR = 0x8 - MSG_NBIO = 0x4000 - MSG_NOSIGNAL = 0x20000 - MSG_NOTIFICATION = 0x2000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MSG_WAITFORONE = 0x80000 - MS_ASYNC = 0x1 - MS_INVALIDATE = 0x2 - MS_SYNC = 0x0 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_FLAGS = 0x2 - NET_RT_IFLIST = 0x3 - NET_RT_IFLISTL = 0x5 - NET_RT_IFMALIST = 0x4 - NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 - NOTE_ATTRIB = 0x8 - NOTE_CHILD = 0x4 - NOTE_CLOSE = 0x100 - NOTE_CLOSE_WRITE = 0x200 - NOTE_DELETE = 0x1 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXTEND = 0x4 - NOTE_FFAND = 0x40000000 - NOTE_FFCOPY = 0xc0000000 - NOTE_FFCTRLMASK = 0xc0000000 - NOTE_FFLAGSMASK = 0xffffff - NOTE_FFNOP = 0x0 - NOTE_FFOR = 0x80000000 - NOTE_FILE_POLL = 0x2 - NOTE_FORK = 0x40000000 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_MSECONDS = 0x2 - NOTE_NSECONDS = 0x8 - NOTE_OPEN = 0x80 - NOTE_PCTRLMASK = 0xf0000000 - NOTE_PDATAMASK = 0xfffff - NOTE_READ = 0x400 - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_SECONDS = 0x1 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRIGGER = 0x1000000 - NOTE_USECONDS = 0x4 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - ONLCR = 0x2 - ONLRET = 0x40 - ONOCR = 0x20 - ONOEOT = 0x8 - OPOST = 0x1 - OXTABS = 0x4 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x100000 - O_CREAT = 0x200 - O_DIRECT = 0x10000 - O_DIRECTORY = 0x20000 - O_EXCL = 0x800 - O_EXEC = 0x40000 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x8000 - O_NOFOLLOW = 0x100 - O_NONBLOCK = 0x4 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_SHLOCK = 0x10 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_TTY_INIT = 0x80000 - O_VERIFY = 0x200000 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - RLIMIT_AS = 0xa - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 - RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_MAX = 0x8 - RTAX_NETMASK = 0x2 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_NETMASK = 0x4 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_FIXEDMTU = 0x80000 - RTF_FMASK = 0x1004d808 - RTF_GATEWAY = 0x2 - RTF_GWFLAG_COMPAT = 0x80000000 - RTF_HOST = 0x4 - RTF_LLDATA = 0x400 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MODIFIED = 0x20 - RTF_MULTICAST = 0x800000 - RTF_PINNED = 0x100000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x40000 - RTF_REJECT = 0x8 - RTF_RNH_LOCKED = 0x40000000 - RTF_STATIC = 0x800 - RTF_STICKY = 0x10000000 - RTF_UP = 0x1 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DELMADDR = 0x10 - RTM_GET = 0x4 - RTM_IEEE80211 = 0x12 - RTM_IFANNOUNCE = 0x11 - RTM_IFINFO = 0xe - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_NEWMADDR = 0xf - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RTV_WEIGHT = 0x100 - RT_ALL_FIBS = -0x1 - RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 - RT_DEFAULT_FIB = 0x0 - RT_HAS_GW = 0x80 - RT_HAS_HEADER = 0x10 - RT_HAS_HEADER_BIT = 0x4 - RT_L2_ME = 0x4 - RT_L2_ME_BIT = 0x2 - RT_LLE_CACHE = 0x100 - RT_MAY_LOOP = 0x8 - RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 - RT_REJECT = 0x20 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_BINTIME = 0x4 - SCM_CREDS = 0x3 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x2 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCAIFGROUP = 0x80246987 - SIOCATMARK = 0x40047307 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFGROUP = 0x80246989 - SIOCDIFPHYADDR = 0x80206949 - SIOCGDRVSPEC = 0xc01c697b - SIOCGETSGCNT = 0xc0147210 - SIOCGETVIFCNT = 0xc014720f - SIOCGHIWAT = 0x40047301 - SIOCGI2C = 0xc020693d - SIOCGIFADDR = 0xc0206921 - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCAP = 0xc020691f - SIOCGIFCONF = 0xc0086924 - SIOCGIFDESCR = 0xc020692a - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFIB = 0xc020695c - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFGENERIC = 0xc020693a - SIOCGIFGMEMB = 0xc024698a - SIOCGIFGROUP = 0xc0246988 - SIOCGIFINDEX = 0xc0206920 - SIOCGIFMAC = 0xc0206926 - SIOCGIFMEDIA = 0xc0286938 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc0206933 - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 - SIOCGIFPHYS = 0xc0206935 - SIOCGIFPSRCADDR = 0xc0206947 - SIOCGIFSTATUS = 0xc331693b - SIOCGIFXMEDIA = 0xc028698b - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCGPRIVATE_0 = 0xc0206950 - SIOCGPRIVATE_1 = 0xc0206951 - SIOCGTUNFIB = 0xc020695e - SIOCIFCREATE = 0xc020697a - SIOCIFCREATE2 = 0xc020697c - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc00c6978 - SIOCSDRVSPEC = 0x801c697b - SIOCSHIWAT = 0x80047300 - SIOCSIFADDR = 0x8020690c - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFCAP = 0x8020691e - SIOCSIFDESCR = 0x80206929 - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFIB = 0x8020695d - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGENERIC = 0x80206939 - SIOCSIFLLADDR = 0x8020693c - SIOCSIFMAC = 0x80206927 - SIOCSIFMEDIA = 0xc0206937 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x80206934 - SIOCSIFNAME = 0x80206928 - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 - SIOCSIFPHYS = 0x80206936 - SIOCSIFRVNET = 0xc020695b - SIOCSIFVNET = 0xc020695a - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SIOCSTUNFIB = 0x8020695f - SOCK_CLOEXEC = 0x10000000 - SOCK_DGRAM = 0x2 - SOCK_MAXADDRLEN = 0xff - SOCK_NONBLOCK = 0x20000000 - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_ACCEPTFILTER = 0x1000 - SO_BINTIME = 0x2000 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LABEL = 0x1009 - SO_LINGER = 0x80 - SO_LISTENINCQLEN = 0x1013 - SO_LISTENQLEN = 0x1012 - SO_LISTENQLIMIT = 0x1011 - SO_NOSIGPIPE = 0x800 - SO_NO_DDP = 0x8000 - SO_NO_OFFLOAD = 0x4000 - SO_OOBINLINE = 0x100 - SO_PEERLABEL = 0x1010 - SO_PROTOCOL = 0x1016 - SO_PROTOTYPE = 0x1016 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_SETFIB = 0x1014 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_TIMESTAMP = 0x400 - SO_TYPE = 0x1008 - SO_USELOOPBACK = 0x40 - SO_USER_COOKIE = 0x1015 - SO_VENDOR = 0x80000000 - TAB0 = 0x0 - TAB3 = 0x4 - TABDLY = 0x4 - TCIFLUSH = 0x1 - TCIOFF = 0x3 - TCIOFLUSH = 0x3 - TCION = 0x4 - TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 - TCP_CA_NAME_MAX = 0x10 - TCP_CCALGOOPT = 0x41 - TCP_CONGESTION = 0x40 - TCP_FASTOPEN = 0x401 - TCP_FUNCTION_BLK = 0x2000 - TCP_FUNCTION_NAME_LEN_MAX = 0x20 - TCP_INFO = 0x20 - TCP_KEEPCNT = 0x400 - TCP_KEEPIDLE = 0x100 - TCP_KEEPINIT = 0x80 - TCP_KEEPINTVL = 0x200 - TCP_MAXBURST = 0x4 - TCP_MAXHLEN = 0x3c - TCP_MAXOLEN = 0x28 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x4 - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0x10 - TCP_MINMSS = 0xd8 - TCP_MSS = 0x218 - TCP_NODELAY = 0x1 - TCP_NOOPT = 0x8 - TCP_NOPUSH = 0x4 - TCP_PCAP_IN = 0x1000 - TCP_PCAP_OUT = 0x800 - TCP_VENDOR = 0x80000000 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDRAIN = 0x2000745e - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLUSH = 0x80047410 - TIOCGDRAINWAIT = 0x40047456 - TIOCGETA = 0x402c7413 - TIOCGETD = 0x4004741a - TIOCGPGRP = 0x40047477 - TIOCGPTN = 0x4004740f - TIOCGSID = 0x40047463 - TIOCGWINSZ = 0x40087468 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGDTRWAIT = 0x4004745a - TIOCMGET = 0x4004746a - TIOCMSDTRWAIT = 0x8004745b - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DCD = 0x40 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCPTMASTER = 0x2000741c - TIOCSBRK = 0x2000747b - TIOCSCTTY = 0x20007461 - TIOCSDRAINWAIT = 0x80047457 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x802c7414 - TIOCSETAF = 0x802c7416 - TIOCSETAW = 0x802c7415 - TIOCSETD = 0x8004741b - TIOCSIG = 0x2004745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCTIMESTAMP = 0x40087459 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VERASE2 = 0x7 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VTIME = 0x11 - VWERASE = 0x4 - WCONTINUED = 0x4 - WCOREFLAG = 0x80 - WEXITED = 0x10 - WLINUXCLONE = 0x80000000 - WNOHANG = 0x1 - WNOWAIT = 0x8 - WSTOPPED = 0x2 - WTRAPPED = 0x20 - WUNTRACED = 0x2 + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2a + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR02 = 0x2b + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4004427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4008426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x400c4280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80084267 + BIOCSETFNR = 0x80084282 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8008427b + BIOCSETZBUF = 0x800c4281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8008426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf6 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xb + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f72 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SMART = 0x20 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_IPXIP = 0xf9 + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf6 + IFT_PFSYNC = 0xf7 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SEP = 0x21 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NORESERVE = 0x40 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NET_RT_MAXID = 0x6 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_RNH_LOCKED = 0x40000000 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_CACHING_CONTEXT = 0x1 + RT_DEFAULT_FIB = 0x0 + RT_NORTREF = 0x2 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCADDRT = 0x8030720a + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80246987 + SIOCALIFADDR = 0x8118691b + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDELRT = 0x8030720b + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80246989 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8118691d + SIOCGDRVSPEC = 0xc01c697b + SIOCGETSGCNT = 0xc0147210 + SIOCGETVIFCNT = 0xc014720f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0086924 + SIOCGIFDESCR = 0xc020692a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc024698a + SIOCGIFGROUP = 0xc0246988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0286938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCSDRVSPEC = 0x801c697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CA_NAME_MAX = 0x10 + TCP_CONGESTION = 0x40 + TCP_INFO = 0x20 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40087459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index ac094f9cf35..e48e7799a1d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -1,5 +1,5 @@ // mkerrors.sh -m64 -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build amd64,freebsd @@ -11,1420 +11,1461 @@ package unix import "syscall" const ( - AF_APPLETALK = 0x10 - AF_ARP = 0x23 - AF_ATM = 0x1e - AF_BLUETOOTH = 0x24 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1a - AF_ECMA = 0x8 - AF_HYLINK = 0xf - AF_IEEE80211 = 0x25 - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x1c - AF_INET6_SDP = 0x2a - AF_INET_SDP = 0x28 - AF_IPX = 0x17 - AF_ISDN = 0x1a - AF_ISO = 0x7 - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x2a - AF_NATM = 0x1d - AF_NETBIOS = 0x6 - AF_NETGRAPH = 0x20 - AF_OSI = 0x7 - AF_PUP = 0x4 - AF_ROUTE = 0x11 - AF_SCLUSTER = 0x22 - AF_SIP = 0x18 - AF_SLOW = 0x21 - AF_SNA = 0xb - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VENDOR00 = 0x27 - AF_VENDOR01 = 0x29 - AF_VENDOR02 = 0x2b - AF_VENDOR03 = 0x2d - AF_VENDOR04 = 0x2f - AF_VENDOR05 = 0x31 - AF_VENDOR06 = 0x33 - AF_VENDOR07 = 0x35 - AF_VENDOR08 = 0x37 - AF_VENDOR09 = 0x39 - AF_VENDOR10 = 0x3b - AF_VENDOR11 = 0x3d - AF_VENDOR12 = 0x3f - AF_VENDOR13 = 0x41 - AF_VENDOR14 = 0x43 - AF_VENDOR15 = 0x45 - AF_VENDOR16 = 0x47 - AF_VENDOR17 = 0x49 - AF_VENDOR18 = 0x4b - AF_VENDOR19 = 0x4d - AF_VENDOR20 = 0x4f - AF_VENDOR21 = 0x51 - AF_VENDOR22 = 0x53 - AF_VENDOR23 = 0x55 - AF_VENDOR24 = 0x57 - AF_VENDOR25 = 0x59 - AF_VENDOR26 = 0x5b - AF_VENDOR27 = 0x5d - AF_VENDOR28 = 0x5f - AF_VENDOR29 = 0x61 - AF_VENDOR30 = 0x63 - AF_VENDOR31 = 0x65 - AF_VENDOR32 = 0x67 - AF_VENDOR33 = 0x69 - AF_VENDOR34 = 0x6b - AF_VENDOR35 = 0x6d - AF_VENDOR36 = 0x6f - AF_VENDOR37 = 0x71 - AF_VENDOR38 = 0x73 - AF_VENDOR39 = 0x75 - AF_VENDOR40 = 0x77 - AF_VENDOR41 = 0x79 - AF_VENDOR42 = 0x7b - AF_VENDOR43 = 0x7d - AF_VENDOR44 = 0x7f - AF_VENDOR45 = 0x81 - AF_VENDOR46 = 0x83 - AF_VENDOR47 = 0x85 - ALTWERASE = 0x200 - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B460800 = 0x70800 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B921600 = 0xe1000 - B9600 = 0x2580 - BIOCFEEDBACK = 0x8004427c - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDIRECTION = 0x40044276 - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc0104279 - BIOCGETBUFMODE = 0x4004427d - BIOCGETIF = 0x4020426b - BIOCGETZMAX = 0x4008427f - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044272 - BIOCGRTIMEOUT = 0x4010426e - BIOCGSEESENT = 0x40044276 - BIOCGSTATS = 0x4008426f - BIOCGTSTAMP = 0x40044283 - BIOCIMMEDIATE = 0x80044270 - BIOCLOCK = 0x2000427a - BIOCPROMISC = 0x20004269 - BIOCROTZBUF = 0x40184280 - BIOCSBLEN = 0xc0044266 - BIOCSDIRECTION = 0x80044277 - BIOCSDLT = 0x80044278 - BIOCSETBUFMODE = 0x8004427e - BIOCSETF = 0x80104267 - BIOCSETFNR = 0x80104282 - BIOCSETIF = 0x8020426c - BIOCSETWF = 0x8010427b - BIOCSETZBUF = 0x80184281 - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044273 - BIOCSRTIMEOUT = 0x8010426d - BIOCSSEESENT = 0x80044277 - BIOCSTSTAMP = 0x80044284 - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x8 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_BUFMODE_BUFFER = 0x1 - BPF_BUFMODE_ZBUF = 0x2 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x80000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_T_BINTIME = 0x2 - BPF_T_BINTIME_FAST = 0x102 - BPF_T_BINTIME_MONOTONIC = 0x202 - BPF_T_BINTIME_MONOTONIC_FAST = 0x302 - BPF_T_FAST = 0x100 - BPF_T_FLAG_MASK = 0x300 - BPF_T_FORMAT_MASK = 0x3 - BPF_T_MICROTIME = 0x0 - BPF_T_MICROTIME_FAST = 0x100 - BPF_T_MICROTIME_MONOTONIC = 0x200 - BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 - BPF_T_MONOTONIC = 0x200 - BPF_T_MONOTONIC_FAST = 0x300 - BPF_T_NANOTIME = 0x1 - BPF_T_NANOTIME_FAST = 0x101 - BPF_T_NANOTIME_MONOTONIC = 0x201 - BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 - BPF_T_NONE = 0x3 - BPF_T_NORMAL = 0x0 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XOR = 0xa0 - BRKINT = 0x2 - CAP_ACCEPT = 0x200000020000000 - CAP_ACL_CHECK = 0x400000000010000 - CAP_ACL_DELETE = 0x400000000020000 - CAP_ACL_GET = 0x400000000040000 - CAP_ACL_SET = 0x400000000080000 - CAP_ALL0 = 0x20007ffffffffff - CAP_ALL1 = 0x4000000001fffff - CAP_BIND = 0x200000040000000 - CAP_BINDAT = 0x200008000000400 - CAP_CHFLAGSAT = 0x200000000001400 - CAP_CONNECT = 0x200000080000000 - CAP_CONNECTAT = 0x200010000000400 - CAP_CREATE = 0x200000000000040 - CAP_EVENT = 0x400000000000020 - CAP_EXTATTR_DELETE = 0x400000000001000 - CAP_EXTATTR_GET = 0x400000000002000 - CAP_EXTATTR_LIST = 0x400000000004000 - CAP_EXTATTR_SET = 0x400000000008000 - CAP_FCHDIR = 0x200000000000800 - CAP_FCHFLAGS = 0x200000000001000 - CAP_FCHMOD = 0x200000000002000 - CAP_FCHMODAT = 0x200000000002400 - CAP_FCHOWN = 0x200000000004000 - CAP_FCHOWNAT = 0x200000000004400 - CAP_FCNTL = 0x200000000008000 - CAP_FCNTL_ALL = 0x78 - CAP_FCNTL_GETFL = 0x8 - CAP_FCNTL_GETOWN = 0x20 - CAP_FCNTL_SETFL = 0x10 - CAP_FCNTL_SETOWN = 0x40 - CAP_FEXECVE = 0x200000000000080 - CAP_FLOCK = 0x200000000010000 - CAP_FPATHCONF = 0x200000000020000 - CAP_FSCK = 0x200000000040000 - CAP_FSTAT = 0x200000000080000 - CAP_FSTATAT = 0x200000000080400 - CAP_FSTATFS = 0x200000000100000 - CAP_FSYNC = 0x200000000000100 - CAP_FTRUNCATE = 0x200000000000200 - CAP_FUTIMES = 0x200000000200000 - CAP_FUTIMESAT = 0x200000000200400 - CAP_GETPEERNAME = 0x200000100000000 - CAP_GETSOCKNAME = 0x200000200000000 - CAP_GETSOCKOPT = 0x200000400000000 - CAP_IOCTL = 0x400000000000080 - CAP_IOCTLS_ALL = 0x7fffffffffffffff - CAP_KQUEUE = 0x400000000100040 - CAP_KQUEUE_CHANGE = 0x400000000100000 - CAP_KQUEUE_EVENT = 0x400000000000040 - CAP_LINKAT_SOURCE = 0x200020000000400 - CAP_LINKAT_TARGET = 0x200000000400400 - CAP_LISTEN = 0x200000800000000 - CAP_LOOKUP = 0x200000000000400 - CAP_MAC_GET = 0x400000000000001 - CAP_MAC_SET = 0x400000000000002 - CAP_MKDIRAT = 0x200000000800400 - CAP_MKFIFOAT = 0x200000001000400 - CAP_MKNODAT = 0x200000002000400 - CAP_MMAP = 0x200000000000010 - CAP_MMAP_R = 0x20000000000001d - CAP_MMAP_RW = 0x20000000000001f - CAP_MMAP_RWX = 0x20000000000003f - CAP_MMAP_RX = 0x20000000000003d - CAP_MMAP_W = 0x20000000000001e - CAP_MMAP_WX = 0x20000000000003e - CAP_MMAP_X = 0x20000000000003c - CAP_PDGETPID = 0x400000000000200 - CAP_PDKILL = 0x400000000000800 - CAP_PDWAIT = 0x400000000000400 - CAP_PEELOFF = 0x200001000000000 - CAP_POLL_EVENT = 0x400000000000020 - CAP_PREAD = 0x20000000000000d - CAP_PWRITE = 0x20000000000000e - CAP_READ = 0x200000000000001 - CAP_RECV = 0x200000000000001 - CAP_RENAMEAT_SOURCE = 0x200000004000400 - CAP_RENAMEAT_TARGET = 0x200040000000400 - CAP_RIGHTS_VERSION = 0x0 - CAP_RIGHTS_VERSION_00 = 0x0 - CAP_SEEK = 0x20000000000000c - CAP_SEEK_TELL = 0x200000000000004 - CAP_SEM_GETVALUE = 0x400000000000004 - CAP_SEM_POST = 0x400000000000008 - CAP_SEM_WAIT = 0x400000000000010 - CAP_SEND = 0x200000000000002 - CAP_SETSOCKOPT = 0x200002000000000 - CAP_SHUTDOWN = 0x200004000000000 - CAP_SOCK_CLIENT = 0x200007780000003 - CAP_SOCK_SERVER = 0x200007f60000003 - CAP_SYMLINKAT = 0x200000008000400 - CAP_TTYHOOK = 0x400000000000100 - CAP_UNLINKAT = 0x200000010000400 - CAP_UNUSED0_44 = 0x200080000000000 - CAP_UNUSED0_57 = 0x300000000000000 - CAP_UNUSED1_22 = 0x400000000200000 - CAP_UNUSED1_57 = 0x500000000000000 - CAP_WRITE = 0x200000000000002 - CFLUSH = 0xf - CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x4 - CLOCK_MONOTONIC_FAST = 0xc - CLOCK_MONOTONIC_PRECISE = 0xb - CLOCK_PROCESS_CPUTIME_ID = 0xf - CLOCK_PROF = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_FAST = 0xa - CLOCK_REALTIME_PRECISE = 0x9 - CLOCK_SECOND = 0xd - CLOCK_THREAD_CPUTIME_ID = 0xe - CLOCK_UPTIME = 0x5 - CLOCK_UPTIME_FAST = 0x8 - CLOCK_UPTIME_PRECISE = 0x7 - CLOCK_VIRTUAL = 0x1 - CREAD = 0x800 - CRTSCTS = 0x30000 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x14 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTL_MAXNAME = 0x18 - CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde - DLT_APPLE_IP_OVER_IEEE1394 = 0x8a - DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 - DLT_ATM_CLIP = 0x13 - DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e - DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_BREDR_BB = 0xff - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_BLUETOOTH_LE_LL = 0xfb - DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 - DLT_BLUETOOTH_LINUX_MONITOR = 0xfe - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 - DLT_CHAOS = 0x5 - DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 - DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_EPON = 0x103 - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 - DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_INFINIBAND = 0xf7 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPMI_HPM_2 = 0x104 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 - DLT_LINUX_SLL = 0x71 - DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x104 - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NETLINK = 0xfd - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 - DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x79 - DLT_PKTAP = 0x102 - DLT_PPI = 0xc0 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 - DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 - DLT_PROFIBUS_DL = 0x101 - DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 - DLT_RAW = 0xc - DLT_RIO = 0x7c - DLT_RTAC_SERIAL = 0xfa - DLT_SCCP = 0x8e - DLT_SCTP = 0xf8 - DLT_SITA = 0xc4 - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USBPCAP = 0xf9 - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WIHART = 0xdf - DLT_WIRESHARK_UPPER_PDU = 0xfc - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EVFILT_AIO = -0x3 - EVFILT_FS = -0x9 - EVFILT_LIO = -0xa - EVFILT_PROC = -0x5 - EVFILT_PROCDESC = -0x8 - EVFILT_READ = -0x1 - EVFILT_SENDFILE = -0xc - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc - EVFILT_TIMER = -0x7 - EVFILT_USER = -0xb - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_DISPATCH = 0x80 - EV_DROP = 0x1000 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG1 = 0x2000 - EV_FLAG2 = 0x4000 - EV_FORCEONESHOT = 0x100 - EV_ONESHOT = 0x10 - EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 - EXTA = 0x4b00 - EXTATTR_NAMESPACE_EMPTY = 0x0 - EXTATTR_NAMESPACE_SYSTEM = 0x2 - EXTATTR_NAMESPACE_USER = 0x1 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FLUSHO = 0x800000 - F_CANCEL = 0x5 - F_DUP2FD = 0xa - F_DUP2FD_CLOEXEC = 0x12 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x11 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0xb - F_GETOWN = 0x5 - F_OGETLK = 0x7 - F_OK = 0x0 - F_OSETLK = 0x8 - F_OSETLKW = 0x9 - F_RDAHEAD = 0x10 - F_RDLCK = 0x1 - F_READAHEAD = 0xf - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0xc - F_SETLKW = 0xd - F_SETLK_REMOTE = 0xe - F_SETOWN = 0x6 - F_UNLCK = 0x2 - F_UNLCKSYS = 0x4 - F_WRLCK = 0x3 - HUPCL = 0x4000 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFAN_ARRIVAL = 0x0 - IFAN_DEPARTURE = 0x1 - IFF_ALLMULTI = 0x200 - IFF_ALTPHYS = 0x4000 - IFF_BROADCAST = 0x2 - IFF_CANTCHANGE = 0x218f52 - IFF_CANTCONFIG = 0x10000 - IFF_DEBUG = 0x4 - IFF_DRV_OACTIVE = 0x400 - IFF_DRV_RUNNING = 0x40 - IFF_DYING = 0x200000 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MONITOR = 0x40000 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PPROMISC = 0x20000 - IFF_PROMISC = 0x100 - IFF_RENAMING = 0x400000 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_STATICARP = 0x80000 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_BRIDGE = 0xd1 - IFT_CARP = 0xf8 - IFT_IEEE1394 = 0x90 - IFT_INFINIBAND = 0xc7 - IFT_L2VLAN = 0x87 - IFT_L3IPVLAN = 0x88 - IFT_PPP = 0x17 - IFT_PROPVIRTUAL = 0x35 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LOOPBACKNET = 0x7f - IN_RFC3021_MASK = 0xfffffffe - IPPROTO_3PC = 0x22 - IPPROTO_ADFS = 0x44 - IPPROTO_AH = 0x33 - IPPROTO_AHIP = 0x3d - IPPROTO_APES = 0x63 - IPPROTO_ARGUS = 0xd - IPPROTO_AX25 = 0x5d - IPPROTO_BHA = 0x31 - IPPROTO_BLT = 0x1e - IPPROTO_BRSATMON = 0x4c - IPPROTO_CARP = 0x70 - IPPROTO_CFTP = 0x3e - IPPROTO_CHAOS = 0x10 - IPPROTO_CMTP = 0x26 - IPPROTO_CPHB = 0x49 - IPPROTO_CPNX = 0x48 - IPPROTO_DDP = 0x25 - IPPROTO_DGP = 0x56 - IPPROTO_DIVERT = 0x102 - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_EMCON = 0xe - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GMTP = 0x64 - IPPROTO_GRE = 0x2f - IPPROTO_HELLO = 0x3f - IPPROTO_HIP = 0x8b - IPPROTO_HMP = 0x14 - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IDPR = 0x23 - IPPROTO_IDRP = 0x2d - IPPROTO_IGMP = 0x2 - IPPROTO_IGP = 0x55 - IPPROTO_IGRP = 0x58 - IPPROTO_IL = 0x28 - IPPROTO_INLSP = 0x34 - IPPROTO_INP = 0x20 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPCV = 0x47 - IPPROTO_IPEIP = 0x5e - IPPROTO_IPIP = 0x4 - IPPROTO_IPPC = 0x43 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_IRTP = 0x1c - IPPROTO_KRYPTOLAN = 0x41 - IPPROTO_LARP = 0x5b - IPPROTO_LEAF1 = 0x19 - IPPROTO_LEAF2 = 0x1a - IPPROTO_MAX = 0x100 - IPPROTO_MEAS = 0x13 - IPPROTO_MH = 0x87 - IPPROTO_MHRP = 0x30 - IPPROTO_MICP = 0x5f - IPPROTO_MOBILE = 0x37 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_MUX = 0x12 - IPPROTO_ND = 0x4d - IPPROTO_NHRP = 0x36 - IPPROTO_NONE = 0x3b - IPPROTO_NSP = 0x1f - IPPROTO_NVPII = 0xb - IPPROTO_OLD_DIVERT = 0xfe - IPPROTO_OSPFIGP = 0x59 - IPPROTO_PFSYNC = 0xf0 - IPPROTO_PGM = 0x71 - IPPROTO_PIGP = 0x9 - IPPROTO_PIM = 0x67 - IPPROTO_PRM = 0x15 - IPPROTO_PUP = 0xc - IPPROTO_PVP = 0x4b - IPPROTO_RAW = 0xff - IPPROTO_RCCMON = 0xa - IPPROTO_RDP = 0x1b - IPPROTO_RESERVED_253 = 0xfd - IPPROTO_RESERVED_254 = 0xfe - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_RVD = 0x42 - IPPROTO_SATEXPAK = 0x40 - IPPROTO_SATMON = 0x45 - IPPROTO_SCCSP = 0x60 - IPPROTO_SCTP = 0x84 - IPPROTO_SDRP = 0x2a - IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 - IPPROTO_SHIM6 = 0x8c - IPPROTO_SKIP = 0x39 - IPPROTO_SPACER = 0x7fff - IPPROTO_SRPC = 0x5a - IPPROTO_ST = 0x7 - IPPROTO_SVMTP = 0x52 - IPPROTO_SWIPE = 0x35 - IPPROTO_TCF = 0x57 - IPPROTO_TCP = 0x6 - IPPROTO_TLSP = 0x38 - IPPROTO_TP = 0x1d - IPPROTO_TPXX = 0x27 - IPPROTO_TRUNK1 = 0x17 - IPPROTO_TRUNK2 = 0x18 - IPPROTO_TTP = 0x54 - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPPROTO_VINES = 0x53 - IPPROTO_VISA = 0x46 - IPPROTO_VMTP = 0x51 - IPPROTO_WBEXPAK = 0x4f - IPPROTO_WBMON = 0x4e - IPPROTO_WSN = 0x4a - IPPROTO_XNET = 0xf - IPPROTO_XTP = 0x24 - IPV6_AUTOFLOWLABEL = 0x3b - IPV6_BINDANY = 0x40 - IPV6_BINDMULTI = 0x41 - IPV6_BINDV6ONLY = 0x1b - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_DONTFRAG = 0x3e - IPV6_DSTOPTS = 0x32 - IPV6_FLOWID = 0x43 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOWTYPE = 0x44 - IPV6_FRAGTTL = 0x78 - IPV6_FW_ADD = 0x1e - IPV6_FW_DEL = 0x1f - IPV6_FW_FLUSH = 0x20 - IPV6_FW_GET = 0x22 - IPV6_FW_ZERO = 0x21 - IPV6_HLIMDEC = 0x1 - IPV6_HOPLIMIT = 0x2f - IPV6_HOPOPTS = 0x31 - IPV6_IPSEC_POLICY = 0x1c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXOPTHDR = 0x800 - IPV6_MAXPACKET = 0xffff - IPV6_MAX_GROUP_SRC_FILTER = 0x200 - IPV6_MAX_MEMBERSHIPS = 0xfff - IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f - IPV6_MMTU = 0x500 - IPV6_MSFILTER = 0x4a - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_NEXTHOP = 0x30 - IPV6_PATHMTU = 0x2c - IPV6_PKTINFO = 0x2e - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_PREFER_TEMPADDR = 0x3f - IPV6_RECVDSTOPTS = 0x28 - IPV6_RECVFLOWID = 0x46 - IPV6_RECVHOPLIMIT = 0x25 - IPV6_RECVHOPOPTS = 0x27 - IPV6_RECVPATHMTU = 0x2b - IPV6_RECVPKTINFO = 0x24 - IPV6_RECVRSSBUCKETID = 0x47 - IPV6_RECVRTHDR = 0x26 - IPV6_RECVTCLASS = 0x39 - IPV6_RSSBUCKETID = 0x45 - IPV6_RSS_LISTEN_BUCKET = 0x42 - IPV6_RTHDR = 0x33 - IPV6_RTHDRDSTOPTS = 0x23 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x3d - IPV6_UNICAST_HOPS = 0x4 - IPV6_USE_MIN_MTU = 0x2a - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_ADD_SOURCE_MEMBERSHIP = 0x46 - IP_BINDANY = 0x18 - IP_BINDMULTI = 0x19 - IP_BLOCK_SOURCE = 0x48 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DONTFRAG = 0x43 - IP_DROP_MEMBERSHIP = 0xd - IP_DROP_SOURCE_MEMBERSHIP = 0x47 - IP_DUMMYNET3 = 0x31 - IP_DUMMYNET_CONFIGURE = 0x3c - IP_DUMMYNET_DEL = 0x3d - IP_DUMMYNET_FLUSH = 0x3e - IP_DUMMYNET_GET = 0x40 - IP_FLOWID = 0x5a - IP_FLOWTYPE = 0x5b - IP_FW3 = 0x30 - IP_FW_ADD = 0x32 - IP_FW_DEL = 0x33 - IP_FW_FLUSH = 0x34 - IP_FW_GET = 0x36 - IP_FW_NAT_CFG = 0x38 - IP_FW_NAT_DEL = 0x39 - IP_FW_NAT_GET_CONFIG = 0x3a - IP_FW_NAT_GET_LOG = 0x3b - IP_FW_RESETLOG = 0x37 - IP_FW_TABLE_ADD = 0x28 - IP_FW_TABLE_DEL = 0x29 - IP_FW_TABLE_FLUSH = 0x2a - IP_FW_TABLE_GETSIZE = 0x2b - IP_FW_TABLE_LIST = 0x2c - IP_FW_ZERO = 0x35 - IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 - IP_MAXPACKET = 0xffff - IP_MAX_GROUP_SRC_FILTER = 0x200 - IP_MAX_MEMBERSHIPS = 0xfff - IP_MAX_SOCK_MUTE_FILTER = 0x80 - IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MF = 0x2000 - IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f - IP_MSFILTER = 0x4a - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_VIF = 0xe - IP_OFFMASK = 0x1fff - IP_ONESBCAST = 0x17 - IP_OPTIONS = 0x1 - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVFLOWID = 0x5d - IP_RECVIF = 0x14 - IP_RECVOPTS = 0x5 - IP_RECVRETOPTS = 0x6 - IP_RECVRSSBUCKETID = 0x5e - IP_RECVTOS = 0x44 - IP_RECVTTL = 0x41 - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RSSBUCKETID = 0x5c - IP_RSS_LISTEN_BUCKET = 0x1a - IP_RSVP_OFF = 0x10 - IP_RSVP_ON = 0xf - IP_RSVP_VIF_OFF = 0x12 - IP_RSVP_VIF_ON = 0x11 - IP_SENDSRCADDR = 0x7 - IP_TOS = 0x3 - IP_TTL = 0x4 - IP_UNBLOCK_SOURCE = 0x49 - ISIG = 0x80 - ISTRIP = 0x20 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_AUTOSYNC = 0x7 - MADV_CORE = 0x9 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x5 - MADV_NOCORE = 0x8 - MADV_NORMAL = 0x0 - MADV_NOSYNC = 0x6 - MADV_PROTECT = 0xa - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_WILLNEED = 0x3 - MAP_32BIT = 0x80000 - MAP_ALIGNED_SUPER = 0x1000000 - MAP_ALIGNMENT_MASK = -0x1000000 - MAP_ALIGNMENT_SHIFT = 0x18 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_EXCL = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_HASSEMAPHORE = 0x200 - MAP_NOCORE = 0x20000 - MAP_NOSYNC = 0x800 - MAP_PREFAULT_READ = 0x40000 - MAP_PRIVATE = 0x2 - MAP_RESERVED0020 = 0x20 - MAP_RESERVED0040 = 0x40 - MAP_RESERVED0080 = 0x80 - MAP_RESERVED0100 = 0x100 - MAP_SHARED = 0x1 - MAP_STACK = 0x400 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MSG_CMSG_CLOEXEC = 0x40000 - MSG_COMPAT = 0x8000 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOF = 0x100 - MSG_EOR = 0x8 - MSG_NBIO = 0x4000 - MSG_NOSIGNAL = 0x20000 - MSG_NOTIFICATION = 0x2000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MSG_WAITFORONE = 0x80000 - MS_ASYNC = 0x1 - MS_INVALIDATE = 0x2 - MS_SYNC = 0x0 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_FLAGS = 0x2 - NET_RT_IFLIST = 0x3 - NET_RT_IFLISTL = 0x5 - NET_RT_IFMALIST = 0x4 - NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 - NOTE_ATTRIB = 0x8 - NOTE_CHILD = 0x4 - NOTE_CLOSE = 0x100 - NOTE_CLOSE_WRITE = 0x200 - NOTE_DELETE = 0x1 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXTEND = 0x4 - NOTE_FFAND = 0x40000000 - NOTE_FFCOPY = 0xc0000000 - NOTE_FFCTRLMASK = 0xc0000000 - NOTE_FFLAGSMASK = 0xffffff - NOTE_FFNOP = 0x0 - NOTE_FFOR = 0x80000000 - NOTE_FILE_POLL = 0x2 - NOTE_FORK = 0x40000000 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_MSECONDS = 0x2 - NOTE_NSECONDS = 0x8 - NOTE_OPEN = 0x80 - NOTE_PCTRLMASK = 0xf0000000 - NOTE_PDATAMASK = 0xfffff - NOTE_READ = 0x400 - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_SECONDS = 0x1 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRIGGER = 0x1000000 - NOTE_USECONDS = 0x4 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - ONLCR = 0x2 - ONLRET = 0x40 - ONOCR = 0x20 - ONOEOT = 0x8 - OPOST = 0x1 - OXTABS = 0x4 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x100000 - O_CREAT = 0x200 - O_DIRECT = 0x10000 - O_DIRECTORY = 0x20000 - O_EXCL = 0x800 - O_EXEC = 0x40000 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x8000 - O_NOFOLLOW = 0x100 - O_NONBLOCK = 0x4 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_SHLOCK = 0x10 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_TTY_INIT = 0x80000 - O_VERIFY = 0x200000 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - RLIMIT_AS = 0xa - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 - RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_MAX = 0x8 - RTAX_NETMASK = 0x2 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_NETMASK = 0x4 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_FIXEDMTU = 0x80000 - RTF_FMASK = 0x1004d808 - RTF_GATEWAY = 0x2 - RTF_GWFLAG_COMPAT = 0x80000000 - RTF_HOST = 0x4 - RTF_LLDATA = 0x400 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MODIFIED = 0x20 - RTF_MULTICAST = 0x800000 - RTF_PINNED = 0x100000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x40000 - RTF_REJECT = 0x8 - RTF_RNH_LOCKED = 0x40000000 - RTF_STATIC = 0x800 - RTF_STICKY = 0x10000000 - RTF_UP = 0x1 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DELMADDR = 0x10 - RTM_GET = 0x4 - RTM_IEEE80211 = 0x12 - RTM_IFANNOUNCE = 0x11 - RTM_IFINFO = 0xe - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_NEWMADDR = 0xf - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RTV_WEIGHT = 0x100 - RT_ALL_FIBS = -0x1 - RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 - RT_DEFAULT_FIB = 0x0 - RT_HAS_GW = 0x80 - RT_HAS_HEADER = 0x10 - RT_HAS_HEADER_BIT = 0x4 - RT_L2_ME = 0x4 - RT_L2_ME_BIT = 0x2 - RT_LLE_CACHE = 0x100 - RT_MAY_LOOP = 0x8 - RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 - RT_REJECT = 0x20 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_BINTIME = 0x4 - SCM_CREDS = 0x3 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x2 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCAIFGROUP = 0x80286987 - SIOCATMARK = 0x40047307 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFGROUP = 0x80286989 - SIOCDIFPHYADDR = 0x80206949 - SIOCGDRVSPEC = 0xc028697b - SIOCGETSGCNT = 0xc0207210 - SIOCGETVIFCNT = 0xc028720f - SIOCGHIWAT = 0x40047301 - SIOCGI2C = 0xc020693d - SIOCGIFADDR = 0xc0206921 - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCAP = 0xc020691f - SIOCGIFCONF = 0xc0106924 - SIOCGIFDESCR = 0xc020692a - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFIB = 0xc020695c - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFGENERIC = 0xc020693a - SIOCGIFGMEMB = 0xc028698a - SIOCGIFGROUP = 0xc0286988 - SIOCGIFINDEX = 0xc0206920 - SIOCGIFMAC = 0xc0206926 - SIOCGIFMEDIA = 0xc0306938 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc0206933 - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 - SIOCGIFPHYS = 0xc0206935 - SIOCGIFPSRCADDR = 0xc0206947 - SIOCGIFSTATUS = 0xc331693b - SIOCGIFXMEDIA = 0xc030698b - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCGPRIVATE_0 = 0xc0206950 - SIOCGPRIVATE_1 = 0xc0206951 - SIOCGTUNFIB = 0xc020695e - SIOCIFCREATE = 0xc020697a - SIOCIFCREATE2 = 0xc020697c - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc0106978 - SIOCSDRVSPEC = 0x8028697b - SIOCSHIWAT = 0x80047300 - SIOCSIFADDR = 0x8020690c - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFCAP = 0x8020691e - SIOCSIFDESCR = 0x80206929 - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFIB = 0x8020695d - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGENERIC = 0x80206939 - SIOCSIFLLADDR = 0x8020693c - SIOCSIFMAC = 0x80206927 - SIOCSIFMEDIA = 0xc0206937 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x80206934 - SIOCSIFNAME = 0x80206928 - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 - SIOCSIFPHYS = 0x80206936 - SIOCSIFRVNET = 0xc020695b - SIOCSIFVNET = 0xc020695a - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SIOCSTUNFIB = 0x8020695f - SOCK_CLOEXEC = 0x10000000 - SOCK_DGRAM = 0x2 - SOCK_MAXADDRLEN = 0xff - SOCK_NONBLOCK = 0x20000000 - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_ACCEPTFILTER = 0x1000 - SO_BINTIME = 0x2000 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LABEL = 0x1009 - SO_LINGER = 0x80 - SO_LISTENINCQLEN = 0x1013 - SO_LISTENQLEN = 0x1012 - SO_LISTENQLIMIT = 0x1011 - SO_NOSIGPIPE = 0x800 - SO_NO_DDP = 0x8000 - SO_NO_OFFLOAD = 0x4000 - SO_OOBINLINE = 0x100 - SO_PEERLABEL = 0x1010 - SO_PROTOCOL = 0x1016 - SO_PROTOTYPE = 0x1016 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_SETFIB = 0x1014 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_TIMESTAMP = 0x400 - SO_TYPE = 0x1008 - SO_USELOOPBACK = 0x40 - SO_USER_COOKIE = 0x1015 - SO_VENDOR = 0x80000000 - TAB0 = 0x0 - TAB3 = 0x4 - TABDLY = 0x4 - TCIFLUSH = 0x1 - TCIOFF = 0x3 - TCIOFLUSH = 0x3 - TCION = 0x4 - TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 - TCP_CA_NAME_MAX = 0x10 - TCP_CCALGOOPT = 0x41 - TCP_CONGESTION = 0x40 - TCP_FASTOPEN = 0x401 - TCP_FUNCTION_BLK = 0x2000 - TCP_FUNCTION_NAME_LEN_MAX = 0x20 - TCP_INFO = 0x20 - TCP_KEEPCNT = 0x400 - TCP_KEEPIDLE = 0x100 - TCP_KEEPINIT = 0x80 - TCP_KEEPINTVL = 0x200 - TCP_MAXBURST = 0x4 - TCP_MAXHLEN = 0x3c - TCP_MAXOLEN = 0x28 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x4 - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0x10 - TCP_MINMSS = 0xd8 - TCP_MSS = 0x218 - TCP_NODELAY = 0x1 - TCP_NOOPT = 0x8 - TCP_NOPUSH = 0x4 - TCP_PCAP_IN = 0x1000 - TCP_PCAP_OUT = 0x800 - TCP_VENDOR = 0x80000000 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDRAIN = 0x2000745e - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLUSH = 0x80047410 - TIOCGDRAINWAIT = 0x40047456 - TIOCGETA = 0x402c7413 - TIOCGETD = 0x4004741a - TIOCGPGRP = 0x40047477 - TIOCGPTN = 0x4004740f - TIOCGSID = 0x40047463 - TIOCGWINSZ = 0x40087468 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGDTRWAIT = 0x4004745a - TIOCMGET = 0x4004746a - TIOCMSDTRWAIT = 0x8004745b - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DCD = 0x40 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCPTMASTER = 0x2000741c - TIOCSBRK = 0x2000747b - TIOCSCTTY = 0x20007461 - TIOCSDRAINWAIT = 0x80047457 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x802c7414 - TIOCSETAF = 0x802c7416 - TIOCSETAW = 0x802c7415 - TIOCSETD = 0x8004741b - TIOCSIG = 0x2004745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCTIMESTAMP = 0x40107459 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VERASE2 = 0x7 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VTIME = 0x11 - VWERASE = 0x4 - WCONTINUED = 0x4 - WCOREFLAG = 0x80 - WEXITED = 0x10 - WLINUXCLONE = 0x80000000 - WNOHANG = 0x1 - WNOWAIT = 0x8 - WSTOPPED = 0x2 - WTRAPPED = 0x20 - WUNTRACED = 0x2 + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2a + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR02 = 0x2b + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4008427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x40184280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x80104282 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8010427b + BIOCSETZBUF = 0x80184281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf6 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xb + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f72 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SMART = 0x20 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_IPXIP = 0xf9 + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf6 + IFT_PFSYNC = 0xf7 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SEP = 0x21 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x80000 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NORESERVE = 0x40 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NET_RT_MAXID = 0x6 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MSECONDS = 0x2 + NOTE_NSECONDS = 0x8 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x4 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_RNH_LOCKED = 0x40000000 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_CACHING_CONTEXT = 0x1 + RT_DEFAULT_FIB = 0x0 + RT_NORTREF = 0x2 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCADDRT = 0x8040720a + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCALIFADDR = 0x8118691b + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDELRT = 0x8040720b + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8118691d + SIOCGDRVSPEC = 0xc028697b + SIOCGETSGCNT = 0xc0207210 + SIOCGETVIFCNT = 0xc028720f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0106924 + SIOCGIFDESCR = 0xc020692a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0306938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSDRVSPEC = 0x8028697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CA_NAME_MAX = 0x10 + TCP_CONGESTION = 0x40 + TCP_INFO = 0x20 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index c5c6f13e530..2afbe2d5ed7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1,5 +1,5 @@ // mkerrors.sh -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build arm,freebsd @@ -11,1428 +11,1442 @@ package unix import "syscall" const ( - AF_APPLETALK = 0x10 - AF_ARP = 0x23 - AF_ATM = 0x1e - AF_BLUETOOTH = 0x24 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1a - AF_ECMA = 0x8 - AF_HYLINK = 0xf - AF_IEEE80211 = 0x25 - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x1c - AF_INET6_SDP = 0x2a - AF_INET_SDP = 0x28 - AF_IPX = 0x17 - AF_ISDN = 0x1a - AF_ISO = 0x7 - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x2a - AF_NATM = 0x1d - AF_NETBIOS = 0x6 - AF_NETGRAPH = 0x20 - AF_OSI = 0x7 - AF_PUP = 0x4 - AF_ROUTE = 0x11 - AF_SCLUSTER = 0x22 - AF_SIP = 0x18 - AF_SLOW = 0x21 - AF_SNA = 0xb - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VENDOR00 = 0x27 - AF_VENDOR01 = 0x29 - AF_VENDOR02 = 0x2b - AF_VENDOR03 = 0x2d - AF_VENDOR04 = 0x2f - AF_VENDOR05 = 0x31 - AF_VENDOR06 = 0x33 - AF_VENDOR07 = 0x35 - AF_VENDOR08 = 0x37 - AF_VENDOR09 = 0x39 - AF_VENDOR10 = 0x3b - AF_VENDOR11 = 0x3d - AF_VENDOR12 = 0x3f - AF_VENDOR13 = 0x41 - AF_VENDOR14 = 0x43 - AF_VENDOR15 = 0x45 - AF_VENDOR16 = 0x47 - AF_VENDOR17 = 0x49 - AF_VENDOR18 = 0x4b - AF_VENDOR19 = 0x4d - AF_VENDOR20 = 0x4f - AF_VENDOR21 = 0x51 - AF_VENDOR22 = 0x53 - AF_VENDOR23 = 0x55 - AF_VENDOR24 = 0x57 - AF_VENDOR25 = 0x59 - AF_VENDOR26 = 0x5b - AF_VENDOR27 = 0x5d - AF_VENDOR28 = 0x5f - AF_VENDOR29 = 0x61 - AF_VENDOR30 = 0x63 - AF_VENDOR31 = 0x65 - AF_VENDOR32 = 0x67 - AF_VENDOR33 = 0x69 - AF_VENDOR34 = 0x6b - AF_VENDOR35 = 0x6d - AF_VENDOR36 = 0x6f - AF_VENDOR37 = 0x71 - AF_VENDOR38 = 0x73 - AF_VENDOR39 = 0x75 - AF_VENDOR40 = 0x77 - AF_VENDOR41 = 0x79 - AF_VENDOR42 = 0x7b - AF_VENDOR43 = 0x7d - AF_VENDOR44 = 0x7f - AF_VENDOR45 = 0x81 - AF_VENDOR46 = 0x83 - AF_VENDOR47 = 0x85 - ALTWERASE = 0x200 - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B460800 = 0x70800 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B921600 = 0xe1000 - B9600 = 0x2580 - BIOCFEEDBACK = 0x8004427c - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDIRECTION = 0x40044276 - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc0084279 - BIOCGETBUFMODE = 0x4004427d - BIOCGETIF = 0x4020426b - BIOCGETZMAX = 0x4004427f - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044272 - BIOCGRTIMEOUT = 0x4010426e - BIOCGSEESENT = 0x40044276 - BIOCGSTATS = 0x4008426f - BIOCGTSTAMP = 0x40044283 - BIOCIMMEDIATE = 0x80044270 - BIOCLOCK = 0x2000427a - BIOCPROMISC = 0x20004269 - BIOCROTZBUF = 0x400c4280 - BIOCSBLEN = 0xc0044266 - BIOCSDIRECTION = 0x80044277 - BIOCSDLT = 0x80044278 - BIOCSETBUFMODE = 0x8004427e - BIOCSETF = 0x80084267 - BIOCSETFNR = 0x80084282 - BIOCSETIF = 0x8020426c - BIOCSETWF = 0x8008427b - BIOCSETZBUF = 0x800c4281 - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044273 - BIOCSRTIMEOUT = 0x8010426d - BIOCSSEESENT = 0x80044277 - BIOCSTSTAMP = 0x80044284 - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x4 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_BUFMODE_BUFFER = 0x1 - BPF_BUFMODE_ZBUF = 0x2 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x80000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_T_BINTIME = 0x2 - BPF_T_BINTIME_FAST = 0x102 - BPF_T_BINTIME_MONOTONIC = 0x202 - BPF_T_BINTIME_MONOTONIC_FAST = 0x302 - BPF_T_FAST = 0x100 - BPF_T_FLAG_MASK = 0x300 - BPF_T_FORMAT_MASK = 0x3 - BPF_T_MICROTIME = 0x0 - BPF_T_MICROTIME_FAST = 0x100 - BPF_T_MICROTIME_MONOTONIC = 0x200 - BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 - BPF_T_MONOTONIC = 0x200 - BPF_T_MONOTONIC_FAST = 0x300 - BPF_T_NANOTIME = 0x1 - BPF_T_NANOTIME_FAST = 0x101 - BPF_T_NANOTIME_MONOTONIC = 0x201 - BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 - BPF_T_NONE = 0x3 - BPF_T_NORMAL = 0x0 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XOR = 0xa0 - BRKINT = 0x2 - CAP_ACCEPT = 0x200000020000000 - CAP_ACL_CHECK = 0x400000000010000 - CAP_ACL_DELETE = 0x400000000020000 - CAP_ACL_GET = 0x400000000040000 - CAP_ACL_SET = 0x400000000080000 - CAP_ALL0 = 0x20007ffffffffff - CAP_ALL1 = 0x4000000001fffff - CAP_BIND = 0x200000040000000 - CAP_BINDAT = 0x200008000000400 - CAP_CHFLAGSAT = 0x200000000001400 - CAP_CONNECT = 0x200000080000000 - CAP_CONNECTAT = 0x200010000000400 - CAP_CREATE = 0x200000000000040 - CAP_EVENT = 0x400000000000020 - CAP_EXTATTR_DELETE = 0x400000000001000 - CAP_EXTATTR_GET = 0x400000000002000 - CAP_EXTATTR_LIST = 0x400000000004000 - CAP_EXTATTR_SET = 0x400000000008000 - CAP_FCHDIR = 0x200000000000800 - CAP_FCHFLAGS = 0x200000000001000 - CAP_FCHMOD = 0x200000000002000 - CAP_FCHMODAT = 0x200000000002400 - CAP_FCHOWN = 0x200000000004000 - CAP_FCHOWNAT = 0x200000000004400 - CAP_FCNTL = 0x200000000008000 - CAP_FCNTL_ALL = 0x78 - CAP_FCNTL_GETFL = 0x8 - CAP_FCNTL_GETOWN = 0x20 - CAP_FCNTL_SETFL = 0x10 - CAP_FCNTL_SETOWN = 0x40 - CAP_FEXECVE = 0x200000000000080 - CAP_FLOCK = 0x200000000010000 - CAP_FPATHCONF = 0x200000000020000 - CAP_FSCK = 0x200000000040000 - CAP_FSTAT = 0x200000000080000 - CAP_FSTATAT = 0x200000000080400 - CAP_FSTATFS = 0x200000000100000 - CAP_FSYNC = 0x200000000000100 - CAP_FTRUNCATE = 0x200000000000200 - CAP_FUTIMES = 0x200000000200000 - CAP_FUTIMESAT = 0x200000000200400 - CAP_GETPEERNAME = 0x200000100000000 - CAP_GETSOCKNAME = 0x200000200000000 - CAP_GETSOCKOPT = 0x200000400000000 - CAP_IOCTL = 0x400000000000080 - CAP_IOCTLS_ALL = 0x7fffffff - CAP_KQUEUE = 0x400000000100040 - CAP_KQUEUE_CHANGE = 0x400000000100000 - CAP_KQUEUE_EVENT = 0x400000000000040 - CAP_LINKAT_SOURCE = 0x200020000000400 - CAP_LINKAT_TARGET = 0x200000000400400 - CAP_LISTEN = 0x200000800000000 - CAP_LOOKUP = 0x200000000000400 - CAP_MAC_GET = 0x400000000000001 - CAP_MAC_SET = 0x400000000000002 - CAP_MKDIRAT = 0x200000000800400 - CAP_MKFIFOAT = 0x200000001000400 - CAP_MKNODAT = 0x200000002000400 - CAP_MMAP = 0x200000000000010 - CAP_MMAP_R = 0x20000000000001d - CAP_MMAP_RW = 0x20000000000001f - CAP_MMAP_RWX = 0x20000000000003f - CAP_MMAP_RX = 0x20000000000003d - CAP_MMAP_W = 0x20000000000001e - CAP_MMAP_WX = 0x20000000000003e - CAP_MMAP_X = 0x20000000000003c - CAP_PDGETPID = 0x400000000000200 - CAP_PDKILL = 0x400000000000800 - CAP_PDWAIT = 0x400000000000400 - CAP_PEELOFF = 0x200001000000000 - CAP_POLL_EVENT = 0x400000000000020 - CAP_PREAD = 0x20000000000000d - CAP_PWRITE = 0x20000000000000e - CAP_READ = 0x200000000000001 - CAP_RECV = 0x200000000000001 - CAP_RENAMEAT_SOURCE = 0x200000004000400 - CAP_RENAMEAT_TARGET = 0x200040000000400 - CAP_RIGHTS_VERSION = 0x0 - CAP_RIGHTS_VERSION_00 = 0x0 - CAP_SEEK = 0x20000000000000c - CAP_SEEK_TELL = 0x200000000000004 - CAP_SEM_GETVALUE = 0x400000000000004 - CAP_SEM_POST = 0x400000000000008 - CAP_SEM_WAIT = 0x400000000000010 - CAP_SEND = 0x200000000000002 - CAP_SETSOCKOPT = 0x200002000000000 - CAP_SHUTDOWN = 0x200004000000000 - CAP_SOCK_CLIENT = 0x200007780000003 - CAP_SOCK_SERVER = 0x200007f60000003 - CAP_SYMLINKAT = 0x200000008000400 - CAP_TTYHOOK = 0x400000000000100 - CAP_UNLINKAT = 0x200000010000400 - CAP_UNUSED0_44 = 0x200080000000000 - CAP_UNUSED0_57 = 0x300000000000000 - CAP_UNUSED1_22 = 0x400000000200000 - CAP_UNUSED1_57 = 0x500000000000000 - CAP_WRITE = 0x200000000000002 - CFLUSH = 0xf - CLOCAL = 0x8000 - CLOCK_MONOTONIC = 0x4 - CLOCK_MONOTONIC_FAST = 0xc - CLOCK_MONOTONIC_PRECISE = 0xb - CLOCK_PROCESS_CPUTIME_ID = 0xf - CLOCK_PROF = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_FAST = 0xa - CLOCK_REALTIME_PRECISE = 0x9 - CLOCK_SECOND = 0xd - CLOCK_THREAD_CPUTIME_ID = 0xe - CLOCK_UPTIME = 0x5 - CLOCK_UPTIME_FAST = 0x8 - CLOCK_UPTIME_PRECISE = 0x7 - CLOCK_VIRTUAL = 0x1 - CREAD = 0x800 - CRTSCTS = 0x30000 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x14 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTL_MAXNAME = 0x18 - CTL_NET = 0x4 - DLT_A429 = 0xb8 - DLT_A653_ICM = 0xb9 - DLT_AIRONET_HEADER = 0x78 - DLT_AOS = 0xde - DLT_APPLE_IP_OVER_IEEE1394 = 0x8a - DLT_ARCNET = 0x7 - DLT_ARCNET_LINUX = 0x81 - DLT_ATM_CLIP = 0x13 - DLT_ATM_RFC1483 = 0xb - DLT_AURORA = 0x7e - DLT_AX25 = 0x3 - DLT_AX25_KISS = 0xca - DLT_BACNET_MS_TP = 0xa5 - DLT_BLUETOOTH_BREDR_BB = 0xff - DLT_BLUETOOTH_HCI_H4 = 0xbb - DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 - DLT_BLUETOOTH_LE_LL = 0xfb - DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 - DLT_BLUETOOTH_LINUX_MONITOR = 0xfe - DLT_CAN20B = 0xbe - DLT_CAN_SOCKETCAN = 0xe3 - DLT_CHAOS = 0x5 - DLT_CHDLC = 0x68 - DLT_CISCO_IOS = 0x76 - DLT_CLASS_NETBSD_RAWAF = 0x2240000 - DLT_C_HDLC = 0x68 - DLT_C_HDLC_WITH_DIR = 0xcd - DLT_DBUS = 0xe7 - DLT_DECT = 0xdd - DLT_DOCSIS = 0x8f - DLT_DVB_CI = 0xeb - DLT_ECONET = 0x73 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0x6d - DLT_EPON = 0x103 - DLT_ERF = 0xc5 - DLT_ERF_ETH = 0xaf - DLT_ERF_POS = 0xb0 - DLT_FC_2 = 0xe0 - DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 - DLT_FDDI = 0xa - DLT_FLEXRAY = 0xd2 - DLT_FRELAY = 0x6b - DLT_FRELAY_WITH_DIR = 0xce - DLT_GCOM_SERIAL = 0xad - DLT_GCOM_T1E1 = 0xac - DLT_GPF_F = 0xab - DLT_GPF_T = 0xaa - DLT_GPRS_LLC = 0xa9 - DLT_GSMTAP_ABIS = 0xda - DLT_GSMTAP_UM = 0xd9 - DLT_IBM_SN = 0x92 - DLT_IBM_SP = 0x91 - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_IEEE802_11_RADIO_AVS = 0xa3 - DLT_IEEE802_15_4 = 0xc3 - DLT_IEEE802_15_4_LINUX = 0xbf - DLT_IEEE802_15_4_NOFCS = 0xe6 - DLT_IEEE802_15_4_NONASK_PHY = 0xd7 - DLT_IEEE802_16_MAC_CPS = 0xbc - DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 - DLT_INFINIBAND = 0xf7 - DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 - DLT_IPMB_LINUX = 0xd1 - DLT_IPMI_HPM_2 = 0x104 - DLT_IPNET = 0xe2 - DLT_IPOIB = 0xf2 - DLT_IPV4 = 0xe4 - DLT_IPV6 = 0xe5 - DLT_IP_OVER_FC = 0x7a - DLT_ISO_14443 = 0x108 - DLT_JUNIPER_ATM1 = 0x89 - DLT_JUNIPER_ATM2 = 0x87 - DLT_JUNIPER_ATM_CEMIC = 0xee - DLT_JUNIPER_CHDLC = 0xb5 - DLT_JUNIPER_ES = 0x84 - DLT_JUNIPER_ETHER = 0xb2 - DLT_JUNIPER_FIBRECHANNEL = 0xea - DLT_JUNIPER_FRELAY = 0xb4 - DLT_JUNIPER_GGSN = 0x85 - DLT_JUNIPER_ISM = 0xc2 - DLT_JUNIPER_MFR = 0x86 - DLT_JUNIPER_MLFR = 0x83 - DLT_JUNIPER_MLPPP = 0x82 - DLT_JUNIPER_MONITOR = 0xa4 - DLT_JUNIPER_PIC_PEER = 0xae - DLT_JUNIPER_PPP = 0xb3 - DLT_JUNIPER_PPPOE = 0xa7 - DLT_JUNIPER_PPPOE_ATM = 0xa8 - DLT_JUNIPER_SERVICES = 0x88 - DLT_JUNIPER_SRX_E2E = 0xe9 - DLT_JUNIPER_ST = 0xc8 - DLT_JUNIPER_VP = 0xb7 - DLT_JUNIPER_VS = 0xe8 - DLT_LAPB_WITH_DIR = 0xcf - DLT_LAPD = 0xcb - DLT_LIN = 0xd4 - DLT_LINUX_EVDEV = 0xd8 - DLT_LINUX_IRDA = 0x90 - DLT_LINUX_LAPD = 0xb1 - DLT_LINUX_PPP_WITHDIRECTION = 0xa6 - DLT_LINUX_SLL = 0x71 - DLT_LOOP = 0x6c - DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x109 - DLT_MATCHING_MIN = 0x68 - DLT_MFR = 0xb6 - DLT_MOST = 0xd3 - DLT_MPEG_2_TS = 0xf3 - DLT_MPLS = 0xdb - DLT_MTP2 = 0x8c - DLT_MTP2_WITH_PHDR = 0x8b - DLT_MTP3 = 0x8d - DLT_MUX27010 = 0xec - DLT_NETANALYZER = 0xf0 - DLT_NETANALYZER_TRANSPARENT = 0xf1 - DLT_NETLINK = 0xfd - DLT_NFC_LLCP = 0xf5 - DLT_NFLOG = 0xef - DLT_NG40 = 0xf4 - DLT_NULL = 0x0 - DLT_PCI_EXP = 0x7d - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x79 - DLT_PKTAP = 0x102 - DLT_PPI = 0xc0 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0xe - DLT_PPP_ETHER = 0x33 - DLT_PPP_PPPD = 0xa6 - DLT_PPP_SERIAL = 0x32 - DLT_PPP_WITH_DIR = 0xcc - DLT_PPP_WITH_DIRECTION = 0xa6 - DLT_PRISM_HEADER = 0x77 - DLT_PROFIBUS_DL = 0x101 - DLT_PRONET = 0x4 - DLT_RAIF1 = 0xc6 - DLT_RAW = 0xc - DLT_RDS = 0x109 - DLT_REDBACK_SMARTEDGE = 0x20 - DLT_RIO = 0x7c - DLT_RTAC_SERIAL = 0xfa - DLT_SCCP = 0x8e - DLT_SCTP = 0xf8 - DLT_SITA = 0xc4 - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xd - DLT_STANAG_5066_D_PDU = 0xed - DLT_SUNATM = 0x7b - DLT_SYMANTEC_FIREWALL = 0x63 - DLT_TZSP = 0x80 - DLT_USB = 0xba - DLT_USBPCAP = 0xf9 - DLT_USB_FREEBSD = 0xba - DLT_USB_LINUX = 0xbd - DLT_USB_LINUX_MMAPPED = 0xdc - DLT_USER0 = 0x93 - DLT_USER1 = 0x94 - DLT_USER10 = 0x9d - DLT_USER11 = 0x9e - DLT_USER12 = 0x9f - DLT_USER13 = 0xa0 - DLT_USER14 = 0xa1 - DLT_USER15 = 0xa2 - DLT_USER2 = 0x95 - DLT_USER3 = 0x96 - DLT_USER4 = 0x97 - DLT_USER5 = 0x98 - DLT_USER6 = 0x99 - DLT_USER7 = 0x9a - DLT_USER8 = 0x9b - DLT_USER9 = 0x9c - DLT_WATTSTOPPER_DLM = 0x107 - DLT_WIHART = 0xdf - DLT_WIRESHARK_UPPER_PDU = 0xfc - DLT_X2E_SERIAL = 0xd5 - DLT_X2E_XORAYA = 0xd6 - DLT_ZWAVE_R1_R2 = 0x105 - DLT_ZWAVE_R3 = 0x106 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EVFILT_AIO = -0x3 - EVFILT_FS = -0x9 - EVFILT_LIO = -0xa - EVFILT_PROC = -0x5 - EVFILT_PROCDESC = -0x8 - EVFILT_READ = -0x1 - EVFILT_SENDFILE = -0xc - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc - EVFILT_TIMER = -0x7 - EVFILT_USER = -0xb - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_DISPATCH = 0x80 - EV_DROP = 0x1000 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG1 = 0x2000 - EV_FLAG2 = 0x4000 - EV_FORCEONESHOT = 0x100 - EV_ONESHOT = 0x10 - EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 - EXTA = 0x4b00 - EXTATTR_NAMESPACE_EMPTY = 0x0 - EXTATTR_NAMESPACE_SYSTEM = 0x2 - EXTATTR_NAMESPACE_USER = 0x1 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FLUSHO = 0x800000 - F_CANCEL = 0x5 - F_DUP2FD = 0xa - F_DUP2FD_CLOEXEC = 0x12 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x11 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0xb - F_GETOWN = 0x5 - F_OGETLK = 0x7 - F_OK = 0x0 - F_OSETLK = 0x8 - F_OSETLKW = 0x9 - F_RDAHEAD = 0x10 - F_RDLCK = 0x1 - F_READAHEAD = 0xf - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0xc - F_SETLKW = 0xd - F_SETLK_REMOTE = 0xe - F_SETOWN = 0x6 - F_UNLCK = 0x2 - F_UNLCKSYS = 0x4 - F_WRLCK = 0x3 - HUPCL = 0x4000 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFAN_ARRIVAL = 0x0 - IFAN_DEPARTURE = 0x1 - IFF_ALLMULTI = 0x200 - IFF_ALTPHYS = 0x4000 - IFF_BROADCAST = 0x2 - IFF_CANTCHANGE = 0x218f52 - IFF_CANTCONFIG = 0x10000 - IFF_DEBUG = 0x4 - IFF_DRV_OACTIVE = 0x400 - IFF_DRV_RUNNING = 0x40 - IFF_DYING = 0x200000 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MONITOR = 0x40000 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PPROMISC = 0x20000 - IFF_PROMISC = 0x100 - IFF_RENAMING = 0x400000 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_STATICARP = 0x80000 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_BRIDGE = 0xd1 - IFT_CARP = 0xf8 - IFT_IEEE1394 = 0x90 - IFT_INFINIBAND = 0xc7 - IFT_L2VLAN = 0x87 - IFT_L3IPVLAN = 0x88 - IFT_PPP = 0x17 - IFT_PROPVIRTUAL = 0x35 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LOOPBACKNET = 0x7f - IN_RFC3021_MASK = 0xfffffffe - IPPROTO_3PC = 0x22 - IPPROTO_ADFS = 0x44 - IPPROTO_AH = 0x33 - IPPROTO_AHIP = 0x3d - IPPROTO_APES = 0x63 - IPPROTO_ARGUS = 0xd - IPPROTO_AX25 = 0x5d - IPPROTO_BHA = 0x31 - IPPROTO_BLT = 0x1e - IPPROTO_BRSATMON = 0x4c - IPPROTO_CARP = 0x70 - IPPROTO_CFTP = 0x3e - IPPROTO_CHAOS = 0x10 - IPPROTO_CMTP = 0x26 - IPPROTO_CPHB = 0x49 - IPPROTO_CPNX = 0x48 - IPPROTO_DDP = 0x25 - IPPROTO_DGP = 0x56 - IPPROTO_DIVERT = 0x102 - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_EMCON = 0xe - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GMTP = 0x64 - IPPROTO_GRE = 0x2f - IPPROTO_HELLO = 0x3f - IPPROTO_HIP = 0x8b - IPPROTO_HMP = 0x14 - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IDPR = 0x23 - IPPROTO_IDRP = 0x2d - IPPROTO_IGMP = 0x2 - IPPROTO_IGP = 0x55 - IPPROTO_IGRP = 0x58 - IPPROTO_IL = 0x28 - IPPROTO_INLSP = 0x34 - IPPROTO_INP = 0x20 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPCV = 0x47 - IPPROTO_IPEIP = 0x5e - IPPROTO_IPIP = 0x4 - IPPROTO_IPPC = 0x43 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_IRTP = 0x1c - IPPROTO_KRYPTOLAN = 0x41 - IPPROTO_LARP = 0x5b - IPPROTO_LEAF1 = 0x19 - IPPROTO_LEAF2 = 0x1a - IPPROTO_MAX = 0x100 - IPPROTO_MEAS = 0x13 - IPPROTO_MH = 0x87 - IPPROTO_MHRP = 0x30 - IPPROTO_MICP = 0x5f - IPPROTO_MOBILE = 0x37 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_MUX = 0x12 - IPPROTO_ND = 0x4d - IPPROTO_NHRP = 0x36 - IPPROTO_NONE = 0x3b - IPPROTO_NSP = 0x1f - IPPROTO_NVPII = 0xb - IPPROTO_OLD_DIVERT = 0xfe - IPPROTO_OSPFIGP = 0x59 - IPPROTO_PFSYNC = 0xf0 - IPPROTO_PGM = 0x71 - IPPROTO_PIGP = 0x9 - IPPROTO_PIM = 0x67 - IPPROTO_PRM = 0x15 - IPPROTO_PUP = 0xc - IPPROTO_PVP = 0x4b - IPPROTO_RAW = 0xff - IPPROTO_RCCMON = 0xa - IPPROTO_RDP = 0x1b - IPPROTO_RESERVED_253 = 0xfd - IPPROTO_RESERVED_254 = 0xfe - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_RVD = 0x42 - IPPROTO_SATEXPAK = 0x40 - IPPROTO_SATMON = 0x45 - IPPROTO_SCCSP = 0x60 - IPPROTO_SCTP = 0x84 - IPPROTO_SDRP = 0x2a - IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 - IPPROTO_SHIM6 = 0x8c - IPPROTO_SKIP = 0x39 - IPPROTO_SPACER = 0x7fff - IPPROTO_SRPC = 0x5a - IPPROTO_ST = 0x7 - IPPROTO_SVMTP = 0x52 - IPPROTO_SWIPE = 0x35 - IPPROTO_TCF = 0x57 - IPPROTO_TCP = 0x6 - IPPROTO_TLSP = 0x38 - IPPROTO_TP = 0x1d - IPPROTO_TPXX = 0x27 - IPPROTO_TRUNK1 = 0x17 - IPPROTO_TRUNK2 = 0x18 - IPPROTO_TTP = 0x54 - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPPROTO_VINES = 0x53 - IPPROTO_VISA = 0x46 - IPPROTO_VMTP = 0x51 - IPPROTO_WBEXPAK = 0x4f - IPPROTO_WBMON = 0x4e - IPPROTO_WSN = 0x4a - IPPROTO_XNET = 0xf - IPPROTO_XTP = 0x24 - IPV6_AUTOFLOWLABEL = 0x3b - IPV6_BINDANY = 0x40 - IPV6_BINDMULTI = 0x41 - IPV6_BINDV6ONLY = 0x1b - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_DONTFRAG = 0x3e - IPV6_DSTOPTS = 0x32 - IPV6_FLOWID = 0x43 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FLOWTYPE = 0x44 - IPV6_FRAGTTL = 0x78 - IPV6_FW_ADD = 0x1e - IPV6_FW_DEL = 0x1f - IPV6_FW_FLUSH = 0x20 - IPV6_FW_GET = 0x22 - IPV6_FW_ZERO = 0x21 - IPV6_HLIMDEC = 0x1 - IPV6_HOPLIMIT = 0x2f - IPV6_HOPOPTS = 0x31 - IPV6_IPSEC_POLICY = 0x1c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXOPTHDR = 0x800 - IPV6_MAXPACKET = 0xffff - IPV6_MAX_GROUP_SRC_FILTER = 0x200 - IPV6_MAX_MEMBERSHIPS = 0xfff - IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f - IPV6_MMTU = 0x500 - IPV6_MSFILTER = 0x4a - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_NEXTHOP = 0x30 - IPV6_PATHMTU = 0x2c - IPV6_PKTINFO = 0x2e - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_PREFER_TEMPADDR = 0x3f - IPV6_RECVDSTOPTS = 0x28 - IPV6_RECVFLOWID = 0x46 - IPV6_RECVHOPLIMIT = 0x25 - IPV6_RECVHOPOPTS = 0x27 - IPV6_RECVPATHMTU = 0x2b - IPV6_RECVPKTINFO = 0x24 - IPV6_RECVRSSBUCKETID = 0x47 - IPV6_RECVRTHDR = 0x26 - IPV6_RECVTCLASS = 0x39 - IPV6_RSSBUCKETID = 0x45 - IPV6_RSS_LISTEN_BUCKET = 0x42 - IPV6_RTHDR = 0x33 - IPV6_RTHDRDSTOPTS = 0x23 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x3d - IPV6_UNICAST_HOPS = 0x4 - IPV6_USE_MIN_MTU = 0x2a - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_ADD_SOURCE_MEMBERSHIP = 0x46 - IP_BINDANY = 0x18 - IP_BINDMULTI = 0x19 - IP_BLOCK_SOURCE = 0x48 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DONTFRAG = 0x43 - IP_DROP_MEMBERSHIP = 0xd - IP_DROP_SOURCE_MEMBERSHIP = 0x47 - IP_DUMMYNET3 = 0x31 - IP_DUMMYNET_CONFIGURE = 0x3c - IP_DUMMYNET_DEL = 0x3d - IP_DUMMYNET_FLUSH = 0x3e - IP_DUMMYNET_GET = 0x40 - IP_FLOWID = 0x5a - IP_FLOWTYPE = 0x5b - IP_FW3 = 0x30 - IP_FW_ADD = 0x32 - IP_FW_DEL = 0x33 - IP_FW_FLUSH = 0x34 - IP_FW_GET = 0x36 - IP_FW_NAT_CFG = 0x38 - IP_FW_NAT_DEL = 0x39 - IP_FW_NAT_GET_CONFIG = 0x3a - IP_FW_NAT_GET_LOG = 0x3b - IP_FW_RESETLOG = 0x37 - IP_FW_TABLE_ADD = 0x28 - IP_FW_TABLE_DEL = 0x29 - IP_FW_TABLE_FLUSH = 0x2a - IP_FW_TABLE_GETSIZE = 0x2b - IP_FW_TABLE_LIST = 0x2c - IP_FW_ZERO = 0x35 - IP_HDRINCL = 0x2 - IP_IPSEC_POLICY = 0x15 - IP_MAXPACKET = 0xffff - IP_MAX_GROUP_SRC_FILTER = 0x200 - IP_MAX_MEMBERSHIPS = 0xfff - IP_MAX_SOCK_MUTE_FILTER = 0x80 - IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MF = 0x2000 - IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f - IP_MSFILTER = 0x4a - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_VIF = 0xe - IP_OFFMASK = 0x1fff - IP_ONESBCAST = 0x17 - IP_OPTIONS = 0x1 - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVFLOWID = 0x5d - IP_RECVIF = 0x14 - IP_RECVOPTS = 0x5 - IP_RECVRETOPTS = 0x6 - IP_RECVRSSBUCKETID = 0x5e - IP_RECVTOS = 0x44 - IP_RECVTTL = 0x41 - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RSSBUCKETID = 0x5c - IP_RSS_LISTEN_BUCKET = 0x1a - IP_RSVP_OFF = 0x10 - IP_RSVP_ON = 0xf - IP_RSVP_VIF_OFF = 0x12 - IP_RSVP_VIF_ON = 0x11 - IP_SENDSRCADDR = 0x7 - IP_TOS = 0x3 - IP_TTL = 0x4 - IP_UNBLOCK_SOURCE = 0x49 - ISIG = 0x80 - ISTRIP = 0x20 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_AUTOSYNC = 0x7 - MADV_CORE = 0x9 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x5 - MADV_NOCORE = 0x8 - MADV_NORMAL = 0x0 - MADV_NOSYNC = 0x6 - MADV_PROTECT = 0xa - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_WILLNEED = 0x3 - MAP_ALIGNED_SUPER = 0x1000000 - MAP_ALIGNMENT_MASK = -0x1000000 - MAP_ALIGNMENT_SHIFT = 0x18 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_EXCL = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_GUARD = 0x2000 - MAP_HASSEMAPHORE = 0x200 - MAP_NOCORE = 0x20000 - MAP_NOSYNC = 0x800 - MAP_PREFAULT_READ = 0x40000 - MAP_PRIVATE = 0x2 - MAP_RESERVED0020 = 0x20 - MAP_RESERVED0040 = 0x40 - MAP_RESERVED0080 = 0x80 - MAP_RESERVED0100 = 0x100 - MAP_SHARED = 0x1 - MAP_STACK = 0x400 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MSG_CMSG_CLOEXEC = 0x40000 - MSG_COMPAT = 0x8000 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOF = 0x100 - MSG_EOR = 0x8 - MSG_NBIO = 0x4000 - MSG_NOSIGNAL = 0x20000 - MSG_NOTIFICATION = 0x2000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MSG_WAITFORONE = 0x80000 - MS_ASYNC = 0x1 - MS_INVALIDATE = 0x2 - MS_SYNC = 0x0 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_FLAGS = 0x2 - NET_RT_IFLIST = 0x3 - NET_RT_IFLISTL = 0x5 - NET_RT_IFMALIST = 0x4 - NOFLSH = 0x80000000 - NOKERNINFO = 0x2000000 - NOTE_ATTRIB = 0x8 - NOTE_CHILD = 0x4 - NOTE_CLOSE = 0x100 - NOTE_CLOSE_WRITE = 0x200 - NOTE_DELETE = 0x1 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXTEND = 0x4 - NOTE_FFAND = 0x40000000 - NOTE_FFCOPY = 0xc0000000 - NOTE_FFCTRLMASK = 0xc0000000 - NOTE_FFLAGSMASK = 0xffffff - NOTE_FFNOP = 0x0 - NOTE_FFOR = 0x80000000 - NOTE_FILE_POLL = 0x2 - NOTE_FORK = 0x40000000 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_MSECONDS = 0x2 - NOTE_NSECONDS = 0x8 - NOTE_OPEN = 0x80 - NOTE_PCTRLMASK = 0xf0000000 - NOTE_PDATAMASK = 0xfffff - NOTE_READ = 0x400 - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_SECONDS = 0x1 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRIGGER = 0x1000000 - NOTE_USECONDS = 0x4 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - ONLCR = 0x2 - ONLRET = 0x40 - ONOCR = 0x20 - ONOEOT = 0x8 - OPOST = 0x1 - OXTABS = 0x4 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x100000 - O_CREAT = 0x200 - O_DIRECT = 0x10000 - O_DIRECTORY = 0x20000 - O_EXCL = 0x800 - O_EXEC = 0x40000 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x8000 - O_NOFOLLOW = 0x100 - O_NONBLOCK = 0x4 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_SHLOCK = 0x10 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_TTY_INIT = 0x80000 - O_VERIFY = 0x200000 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - RLIMIT_AS = 0xa - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_MEMLOCK = 0x6 - RLIMIT_NOFILE = 0x8 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_MAX = 0x8 - RTAX_NETMASK = 0x2 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_NETMASK = 0x4 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_FIXEDMTU = 0x80000 - RTF_FMASK = 0x1004d808 - RTF_GATEWAY = 0x2 - RTF_GWFLAG_COMPAT = 0x80000000 - RTF_HOST = 0x4 - RTF_LLDATA = 0x400 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MODIFIED = 0x20 - RTF_MULTICAST = 0x800000 - RTF_PINNED = 0x100000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x40000 - RTF_REJECT = 0x8 - RTF_RNH_LOCKED = 0x40000000 - RTF_STATIC = 0x800 - RTF_STICKY = 0x10000000 - RTF_UP = 0x1 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DELMADDR = 0x10 - RTM_GET = 0x4 - RTM_IEEE80211 = 0x12 - RTM_IFANNOUNCE = 0x11 - RTM_IFINFO = 0xe - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_NEWMADDR = 0xf - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RTV_WEIGHT = 0x100 - RT_ALL_FIBS = -0x1 - RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 - RT_DEFAULT_FIB = 0x0 - RT_HAS_GW = 0x80 - RT_HAS_HEADER = 0x10 - RT_HAS_HEADER_BIT = 0x4 - RT_L2_ME = 0x4 - RT_L2_ME_BIT = 0x2 - RT_LLE_CACHE = 0x100 - RT_MAY_LOOP = 0x8 - RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 - RT_REJECT = 0x20 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_BINTIME = 0x4 - SCM_CREDS = 0x3 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x2 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCAIFGROUP = 0x80246987 - SIOCATMARK = 0x40047307 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFGROUP = 0x80246989 - SIOCDIFPHYADDR = 0x80206949 - SIOCGDRVSPEC = 0xc01c697b - SIOCGETSGCNT = 0xc0147210 - SIOCGETVIFCNT = 0xc014720f - SIOCGHIWAT = 0x40047301 - SIOCGHWADDR = 0xc020693e - SIOCGI2C = 0xc020693d - SIOCGIFADDR = 0xc0206921 - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCAP = 0xc020691f - SIOCGIFCONF = 0xc0086924 - SIOCGIFDESCR = 0xc020692a - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFIB = 0xc020695c - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFGENERIC = 0xc020693a - SIOCGIFGMEMB = 0xc024698a - SIOCGIFGROUP = 0xc0246988 - SIOCGIFINDEX = 0xc0206920 - SIOCGIFMAC = 0xc0206926 - SIOCGIFMEDIA = 0xc0286938 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc0206933 - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 - SIOCGIFPHYS = 0xc0206935 - SIOCGIFPSRCADDR = 0xc0206947 - SIOCGIFSTATUS = 0xc331693b - SIOCGIFXMEDIA = 0xc028698b - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCGPRIVATE_0 = 0xc0206950 - SIOCGPRIVATE_1 = 0xc0206951 - SIOCGTUNFIB = 0xc020695e - SIOCIFCREATE = 0xc020697a - SIOCIFCREATE2 = 0xc020697c - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc00c6978 - SIOCSDRVSPEC = 0x801c697b - SIOCSHIWAT = 0x80047300 - SIOCSIFADDR = 0x8020690c - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFCAP = 0x8020691e - SIOCSIFDESCR = 0x80206929 - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFIB = 0x8020695d - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGENERIC = 0x80206939 - SIOCSIFLLADDR = 0x8020693c - SIOCSIFMAC = 0x80206927 - SIOCSIFMEDIA = 0xc0206937 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x80206934 - SIOCSIFNAME = 0x80206928 - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 - SIOCSIFPHYS = 0x80206936 - SIOCSIFRVNET = 0xc020695b - SIOCSIFVNET = 0xc020695a - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SIOCSTUNFIB = 0x8020695f - SOCK_CLOEXEC = 0x10000000 - SOCK_DGRAM = 0x2 - SOCK_MAXADDRLEN = 0xff - SOCK_NONBLOCK = 0x20000000 - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_ACCEPTFILTER = 0x1000 - SO_BINTIME = 0x2000 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LABEL = 0x1009 - SO_LINGER = 0x80 - SO_LISTENINCQLEN = 0x1013 - SO_LISTENQLEN = 0x1012 - SO_LISTENQLIMIT = 0x1011 - SO_NOSIGPIPE = 0x800 - SO_NO_DDP = 0x8000 - SO_NO_OFFLOAD = 0x4000 - SO_OOBINLINE = 0x100 - SO_PEERLABEL = 0x1010 - SO_PROTOCOL = 0x1016 - SO_PROTOTYPE = 0x1016 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_SETFIB = 0x1014 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_TIMESTAMP = 0x400 - SO_TYPE = 0x1008 - SO_USELOOPBACK = 0x40 - SO_USER_COOKIE = 0x1015 - SO_VENDOR = 0x80000000 - TAB0 = 0x0 - TAB3 = 0x4 - TABDLY = 0x4 - TCIFLUSH = 0x1 - TCIOFF = 0x3 - TCIOFLUSH = 0x3 - TCION = 0x4 - TCOFLUSH = 0x2 - TCOOFF = 0x1 - TCOON = 0x2 - TCP_CA_NAME_MAX = 0x10 - TCP_CCALGOOPT = 0x41 - TCP_CONGESTION = 0x40 - TCP_FASTOPEN = 0x401 - TCP_FUNCTION_BLK = 0x2000 - TCP_FUNCTION_NAME_LEN_MAX = 0x20 - TCP_INFO = 0x20 - TCP_KEEPCNT = 0x400 - TCP_KEEPIDLE = 0x100 - TCP_KEEPINIT = 0x80 - TCP_KEEPINTVL = 0x200 - TCP_MAXBURST = 0x4 - TCP_MAXHLEN = 0x3c - TCP_MAXOLEN = 0x28 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x4 - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0x10 - TCP_MINMSS = 0xd8 - TCP_MSS = 0x218 - TCP_NODELAY = 0x1 - TCP_NOOPT = 0x8 - TCP_NOPUSH = 0x4 - TCP_PCAP_IN = 0x1000 - TCP_PCAP_OUT = 0x800 - TCP_VENDOR = 0x80000000 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDRAIN = 0x2000745e - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLUSH = 0x80047410 - TIOCGDRAINWAIT = 0x40047456 - TIOCGETA = 0x402c7413 - TIOCGETD = 0x4004741a - TIOCGPGRP = 0x40047477 - TIOCGPTN = 0x4004740f - TIOCGSID = 0x40047463 - TIOCGWINSZ = 0x40087468 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGDTRWAIT = 0x4004745a - TIOCMGET = 0x4004746a - TIOCMSDTRWAIT = 0x8004745b - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DCD = 0x40 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCPTMASTER = 0x2000741c - TIOCSBRK = 0x2000747b - TIOCSCTTY = 0x20007461 - TIOCSDRAINWAIT = 0x80047457 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x802c7414 - TIOCSETAF = 0x802c7416 - TIOCSETAW = 0x802c7415 - TIOCSETD = 0x8004741b - TIOCSIG = 0x2004745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCTIMESTAMP = 0x40107459 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VERASE2 = 0x7 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VTIME = 0x11 - VWERASE = 0x4 - WCONTINUED = 0x4 - WCOREFLAG = 0x80 - WEXITED = 0x10 - WLINUXCLONE = 0x80000000 - WNOHANG = 0x1 - WNOWAIT = 0x8 - WSTOPPED = 0x2 - WTRAPPED = 0x20 - WUNTRACED = 0x2 + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2a + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR02 = 0x2b + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0084279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4004427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4008426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x400c4280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80084267 + BIOCSETFNR = 0x80084282 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x8008427b + BIOCSETZBUF = 0x800c4281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8008426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CREAD = 0x800 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DOCSIS = 0x8f + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_HHDLC = 0x79 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_IPFILTER = 0x74 + DLT_IPMB = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LOOP = 0x6c + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0xf6 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NULL = 0x0 + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RIO = 0x7c + DLT_SCCP = 0x8e + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_WIHART = 0xdf + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EVFILT_AIO = -0x3 + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xb + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f72 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SMART = 0x20 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf8 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf2 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_IPXIP = 0xf9 + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf6 + IFT_PFSYNC = 0xf7 + IFT_PLC = 0xae + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf1 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_STF = 0xd7 + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VOICEEM = 0x64 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x34 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SEP = 0x21 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MIN_MEMBERSHIPS = 0x1f + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FAITH = 0x16 + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MIN_MEMBERSHIPS = 0x1f + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + ISIG = 0x80 + ISTRIP = 0x20 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NORESERVE = 0x40 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x20 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NET_RT_MAXID = 0x6 + NOFLSH = 0x80000000 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x8 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PRCLONING = 0x10000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_RNH_LOCKED = 0x40000000 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_CACHING_CONTEXT = 0x1 + RT_DEFAULT_FIB = 0x0 + RT_NORTREF = 0x2 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCADDRT = 0x8030720a + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80246987 + SIOCALIFADDR = 0x8118691b + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDELRT = 0x8030720b + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80246989 + SIOCDIFPHYADDR = 0x80206949 + SIOCDLIFADDR = 0x8118691d + SIOCGDRVSPEC = 0xc01c697b + SIOCGETSGCNT = 0xc0147210 + SIOCGETVIFCNT = 0xc014720f + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0086924 + SIOCGIFDESCR = 0xc020692a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc024698a + SIOCGIFGROUP = 0xc0246988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0286938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFSTATUS = 0xc331693b + SIOCGLIFADDR = 0xc118691c + SIOCGLIFPHYADDR = 0xc118694b + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc00c6978 + SIOCSDRVSPEC = 0x801c697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLIFPHYADDR = 0x8118694a + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + TCIFLUSH = 0x1 + TCIOFLUSH = 0x3 + TCOFLUSH = 0x2 + TCP_CA_NAME_MAX = 0x10 + TCP_CONGESTION = 0x40 + TCP_INFO = 0x20 + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40087459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 ) // Errors diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4066ad1e0f7..b536f67a00c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2c + AF_MAX = 0x2b AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,7 +51,6 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe - AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -130,7 +129,6 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -327,9 +325,6 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -394,7 +389,6 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -456,8 +450,6 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -512,19 +504,6 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HUPCL = 0x400 @@ -662,10 +641,8 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -678,14 +655,12 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -696,10 +671,8 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -713,9 +686,7 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -758,7 +729,6 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -796,7 +766,6 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -930,7 +899,6 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -945,7 +913,6 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -964,7 +931,6 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -985,10 +951,8 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1045,7 +1009,6 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1090,16 +1053,6 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80042407 - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1266,16 +1219,7 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 RLIM_INFINITY = -0x1 RTAX_ADVMSS = 0x8 @@ -1302,7 +1246,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x19 RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1346,7 +1290,6 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1355,7 +1298,6 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1377,11 +1319,10 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x5f RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1396,8 +1337,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1408,7 +1349,6 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1439,12 +1379,8 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1452,16 +1388,6 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1469,9 +1395,7 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1491,21 +1415,13 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1524,15 +1440,11 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1578,7 +1490,6 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1587,13 +1498,11 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1601,7 +1510,6 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1669,12 +1577,6 @@ const ( TAB2 = 0x1000 TAB3 = 0x1800 TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x8 TCFLSH = 0x540b TCGETA = 0x5405 TCGETS = 0x5401 @@ -1698,7 +1600,6 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1758,7 +1659,6 @@ const ( TIOCGPKT = 0x80045438 TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1816,7 +1716,6 @@ const ( TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 TOSTOP = 0x100 - TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 TUNGETFEATURES = 0x800454cf @@ -1841,7 +1740,6 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc - UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1878,8 +1776,6 @@ const ( WORDSIZE = 0x20 WSTOPPED = 0x2 WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) @@ -2051,6 +1947,7 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c9f53b0b37f..e8b23353335 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2c + AF_MAX = 0x2b AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,7 +51,6 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe - AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -130,7 +129,6 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -327,9 +325,6 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -394,7 +389,6 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -456,8 +450,6 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -512,19 +504,6 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HUPCL = 0x400 @@ -662,10 +641,8 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -678,14 +655,12 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -696,10 +671,8 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -713,9 +686,7 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -758,7 +729,6 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -796,7 +766,6 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -930,7 +899,6 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -945,7 +913,6 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -964,7 +931,6 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -985,10 +951,8 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1045,7 +1009,6 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1090,16 +1053,6 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80082407 - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1267,16 +1220,7 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 RLIM_INFINITY = -0x1 RTAX_ADVMSS = 0x8 @@ -1303,7 +1247,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x19 RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1347,7 +1291,6 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1356,7 +1299,6 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1378,11 +1320,10 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x5f RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1397,8 +1338,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1409,7 +1350,6 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1440,12 +1380,8 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1453,16 +1389,6 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1470,9 +1396,7 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1492,21 +1416,13 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1525,15 +1441,11 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1579,7 +1491,6 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1588,13 +1499,11 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1602,7 +1511,6 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1670,12 +1578,6 @@ const ( TAB2 = 0x1000 TAB3 = 0x1800 TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x8 TCFLSH = 0x540b TCGETA = 0x5405 TCGETS = 0x5401 @@ -1699,7 +1601,6 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1759,7 +1660,6 @@ const ( TIOCGPKT = 0x80045438 TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1817,7 +1717,6 @@ const ( TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 TOSTOP = 0x100 - TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 TUNGETFEATURES = 0x800454cf @@ -1842,7 +1741,6 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc - UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1879,8 +1777,6 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) @@ -2052,6 +1948,7 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3e8c2c7aa6a..fd0fb1c588c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2c + AF_MAX = 0x2b AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,7 +51,6 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe - AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -130,7 +129,6 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -327,9 +325,6 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -394,7 +389,6 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -456,8 +450,6 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -512,19 +504,6 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HUPCL = 0x400 @@ -662,10 +641,8 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -678,14 +655,12 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -696,10 +671,8 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -713,9 +686,7 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -758,7 +729,6 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -796,7 +766,6 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -929,7 +898,6 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -944,7 +912,6 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -963,7 +930,6 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -984,10 +950,8 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1044,7 +1008,6 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1089,16 +1052,6 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80042407 - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1271,16 +1224,7 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 RLIM_INFINITY = -0x1 RTAX_ADVMSS = 0x8 @@ -1307,7 +1251,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x19 RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1351,7 +1295,6 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1360,7 +1303,6 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1382,11 +1324,10 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x5f RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1401,8 +1342,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1413,7 +1354,6 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1444,12 +1384,8 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1457,16 +1393,6 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1474,9 +1400,7 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1496,21 +1420,13 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1529,15 +1445,11 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1583,7 +1495,6 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1592,13 +1503,11 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1606,7 +1515,6 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1674,12 +1582,6 @@ const ( TAB2 = 0x1000 TAB3 = 0x1800 TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x8 TCFLSH = 0x540b TCGETA = 0x5405 TCGETS = 0x5401 @@ -1703,7 +1605,6 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1763,7 +1664,6 @@ const ( TIOCGPKT = 0x80045438 TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1821,7 +1721,6 @@ const ( TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 TOSTOP = 0x100 - TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 TUNGETFEATURES = 0x800454cf @@ -1846,7 +1745,6 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc - UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1883,8 +1781,6 @@ const ( WORDSIZE = 0x20 WSTOPPED = 0x2 WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) @@ -2056,6 +1952,7 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 383453349ff..50f7efb82df 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2c + AF_MAX = 0x2b AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,7 +51,6 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe - AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -130,7 +129,6 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -327,9 +325,6 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -395,7 +390,6 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -444,7 +438,6 @@ const ( EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 - EXTRA_MAGIC = 0x45585401 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -458,8 +451,6 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -514,19 +505,6 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HUPCL = 0x400 @@ -664,10 +642,8 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -680,14 +656,12 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -698,10 +672,8 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -715,9 +687,7 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -760,7 +730,6 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -798,7 +767,6 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -931,7 +899,6 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -946,7 +913,6 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -965,7 +931,6 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -986,10 +951,8 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1046,7 +1009,6 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1091,16 +1053,6 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80082407 - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1257,16 +1209,7 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 RLIM_INFINITY = -0x1 RTAX_ADVMSS = 0x8 @@ -1293,7 +1236,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x19 RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1337,7 +1280,6 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1346,7 +1288,6 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1368,11 +1309,10 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x5f RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1387,8 +1327,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1399,7 +1339,6 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1430,12 +1369,8 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1443,16 +1378,6 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1460,9 +1385,7 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1482,21 +1405,13 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1515,15 +1430,11 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1569,7 +1480,6 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1578,13 +1488,11 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1592,7 +1500,6 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1660,12 +1567,6 @@ const ( TAB2 = 0x1000 TAB3 = 0x1800 TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x8 TCFLSH = 0x540b TCGETA = 0x5405 TCGETS = 0x5401 @@ -1689,7 +1590,6 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1749,7 +1649,6 @@ const ( TIOCGPKT = 0x80045438 TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1807,7 +1706,6 @@ const ( TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 TOSTOP = 0x100 - TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 TUNGETFEATURES = 0x800454cf @@ -1832,7 +1730,6 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc - UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1869,8 +1766,6 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) @@ -2042,6 +1937,7 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index bde8f7d023c..984b7688e52 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2c + AF_MAX = 0x2b AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,7 +51,6 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe - AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -130,7 +129,6 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -327,9 +325,6 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -394,7 +389,6 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -456,8 +450,6 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -512,19 +504,6 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HUPCL = 0x400 @@ -662,10 +641,8 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -678,14 +655,12 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -696,10 +671,8 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -713,9 +686,7 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -758,7 +729,6 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -796,7 +766,6 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -930,7 +899,6 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -945,7 +913,6 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -964,7 +931,6 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -985,10 +951,8 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1045,7 +1009,6 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1090,16 +1053,6 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40042407 - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1268,16 +1221,7 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 RLIM_INFINITY = -0x1 RTAX_ADVMSS = 0x8 @@ -1304,7 +1248,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x19 RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1348,7 +1292,6 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1357,7 +1300,6 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1379,11 +1321,10 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x5f RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1398,8 +1339,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1410,7 +1351,6 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1441,12 +1381,8 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1454,16 +1390,6 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1471,9 +1397,7 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1493,21 +1417,13 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1526,15 +1442,11 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x80 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1580,7 +1492,6 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1589,13 +1500,11 @@ const ( SO_ERROR = 0x1007 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x8 SO_LINGER = 0x80 SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0x100 @@ -1603,7 +1512,6 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1e SO_PRIORITY = 0xc @@ -1672,12 +1580,6 @@ const ( TAB2 = 0x1000 TAB3 = 0x1800 TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x8 TCFLSH = 0x5407 TCGETA = 0x5401 TCGETS = 0x540d @@ -1700,7 +1602,6 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1759,7 +1660,6 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x4020542e TIOCGSERIAL = 0x5484 TIOCGSID = 0x7416 @@ -1820,7 +1720,6 @@ const ( TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 TOSTOP = 0x8000 - TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 TUNGETFEATURES = 0x400454cf @@ -1845,7 +1744,6 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc - UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1883,8 +1781,6 @@ const ( WORDSIZE = 0x20 WSTOPPED = 0x2 WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 42b6397d5d6..75f72da3ca7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2c + AF_MAX = 0x2b AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,7 +51,6 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe - AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -130,7 +129,6 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -327,9 +325,6 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -394,7 +389,6 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -456,8 +450,6 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -512,19 +504,6 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HUPCL = 0x400 @@ -662,10 +641,8 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -678,14 +655,12 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -696,10 +671,8 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -713,9 +686,7 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -758,7 +729,6 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -796,7 +766,6 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -930,7 +899,6 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -945,7 +913,6 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -964,7 +931,6 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -985,10 +951,8 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1045,7 +1009,6 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1090,16 +1053,6 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1268,16 +1221,7 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 RLIM_INFINITY = -0x1 RTAX_ADVMSS = 0x8 @@ -1304,7 +1248,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x19 RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1348,7 +1292,6 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1357,7 +1300,6 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1379,11 +1321,10 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x5f RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1398,8 +1339,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1410,7 +1351,6 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1441,12 +1381,8 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1454,16 +1390,6 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1471,9 +1397,7 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1493,21 +1417,13 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1526,15 +1442,11 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x80 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1580,7 +1492,6 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1589,13 +1500,11 @@ const ( SO_ERROR = 0x1007 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x8 SO_LINGER = 0x80 SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0x100 @@ -1603,7 +1512,6 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1e SO_PRIORITY = 0xc @@ -1672,12 +1580,6 @@ const ( TAB2 = 0x1000 TAB3 = 0x1800 TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x8 TCFLSH = 0x5407 TCGETA = 0x5401 TCGETS = 0x540d @@ -1700,7 +1602,6 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1759,7 +1660,6 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x4020542e TIOCGSERIAL = 0x5484 TIOCGSID = 0x7416 @@ -1820,7 +1720,6 @@ const ( TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 TOSTOP = 0x8000 - TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 TUNGETFEATURES = 0x400454cf @@ -1845,7 +1744,6 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc - UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1883,8 +1781,6 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index bd4ff814749..06d704e83b8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2c + AF_MAX = 0x2b AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,7 +51,6 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe - AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -130,7 +129,6 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -327,9 +325,6 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -394,7 +389,6 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -456,8 +450,6 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -512,19 +504,6 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HUPCL = 0x400 @@ -662,10 +641,8 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -678,14 +655,12 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -696,10 +671,8 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -713,9 +686,7 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -758,7 +729,6 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -796,7 +766,6 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -930,7 +899,6 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -945,7 +913,6 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -964,7 +931,6 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -985,10 +951,8 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1045,7 +1009,6 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1090,16 +1053,6 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1268,16 +1221,7 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 RLIM_INFINITY = -0x1 RTAX_ADVMSS = 0x8 @@ -1304,7 +1248,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x19 RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1348,7 +1292,6 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1357,7 +1300,6 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1379,11 +1321,10 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x5f RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1398,8 +1339,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1410,7 +1351,6 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1441,12 +1381,8 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1454,16 +1390,6 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1471,9 +1397,7 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1493,21 +1417,13 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1526,15 +1442,11 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x80 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1580,7 +1492,6 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1589,13 +1500,11 @@ const ( SO_ERROR = 0x1007 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x8 SO_LINGER = 0x80 SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0x100 @@ -1603,7 +1512,6 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1e SO_PRIORITY = 0xc @@ -1672,12 +1580,6 @@ const ( TAB2 = 0x1000 TAB3 = 0x1800 TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x8 TCFLSH = 0x5407 TCGETA = 0x5401 TCGETS = 0x540d @@ -1700,7 +1602,6 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1759,7 +1660,6 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x4020542e TIOCGSERIAL = 0x5484 TIOCGSID = 0x7416 @@ -1820,7 +1720,6 @@ const ( TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 TOSTOP = 0x8000 - TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 TUNGETFEATURES = 0x400454cf @@ -1845,7 +1744,6 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc - UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1883,8 +1781,6 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 6dfc95c40f7..94e1ac2da91 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2c + AF_MAX = 0x2b AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,7 +51,6 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe - AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -130,7 +129,6 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -327,9 +325,6 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -394,7 +389,6 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -456,8 +450,6 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -512,19 +504,6 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HUPCL = 0x400 @@ -662,10 +641,8 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -678,14 +655,12 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -696,10 +671,8 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -713,9 +686,7 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -758,7 +729,6 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -796,7 +766,6 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -930,7 +899,6 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -945,7 +913,6 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -964,7 +931,6 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -985,10 +951,8 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1045,7 +1009,6 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1090,16 +1053,6 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40042407 - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1268,16 +1221,7 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 RLIM_INFINITY = -0x1 RTAX_ADVMSS = 0x8 @@ -1304,7 +1248,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x19 RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1348,7 +1292,6 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1357,7 +1300,6 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1379,11 +1321,10 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x5f RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1398,8 +1339,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1410,7 +1351,6 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1441,12 +1381,8 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1454,16 +1390,6 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1471,9 +1397,7 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1493,21 +1417,13 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x40047309 SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1526,15 +1442,11 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x80 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1580,7 +1492,6 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1589,13 +1500,11 @@ const ( SO_ERROR = 0x1007 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x8 SO_LINGER = 0x80 SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0x100 @@ -1603,7 +1512,6 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1e SO_PRIORITY = 0xc @@ -1672,12 +1580,6 @@ const ( TAB2 = 0x1000 TAB3 = 0x1800 TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x8 TCFLSH = 0x5407 TCGETA = 0x5401 TCGETS = 0x540d @@ -1700,7 +1602,6 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1759,7 +1660,6 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x4020542e TIOCGSERIAL = 0x5484 TIOCGSID = 0x7416 @@ -1820,7 +1720,6 @@ const ( TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 TOSTOP = 0x8000 - TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 TUNGETFEATURES = 0x400454cf @@ -1845,7 +1744,6 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc - UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1883,8 +1781,6 @@ const ( WORDSIZE = 0x20 WSTOPPED = 0x2 WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 46b09d320de..0645c5c1967 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2c + AF_MAX = 0x2b AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,7 +51,6 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe - AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -130,7 +129,6 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x17 @@ -327,9 +325,6 @@ const ( ECHOKE = 0x1 ECHONL = 0x10 ECHOPRT = 0x20 - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -394,7 +389,6 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -456,8 +450,6 @@ const ( FF1 = 0x4000 FFDLY = 0x4000 FLUSHO = 0x800000 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -512,19 +504,6 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HUPCL = 0x4000 @@ -662,10 +641,8 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -678,14 +655,12 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -696,10 +671,8 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -713,9 +686,7 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -758,7 +729,6 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -796,7 +766,6 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -929,7 +898,6 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -944,7 +912,6 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -963,7 +930,6 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -986,10 +952,8 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1046,7 +1010,6 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1091,16 +1054,6 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1324,16 +1277,7 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 RLIM_INFINITY = -0x1 RTAX_ADVMSS = 0x8 @@ -1360,7 +1304,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x19 RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1404,7 +1348,6 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1413,7 +1356,6 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1435,11 +1377,10 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x5f RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1454,8 +1395,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1466,7 +1407,6 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1497,12 +1437,8 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1510,16 +1446,6 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1527,9 +1453,7 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1549,21 +1473,13 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x4004667f - SIOCOUTQ = 0x40047473 - SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1582,15 +1498,11 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1636,7 +1548,6 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1645,13 +1556,11 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1659,7 +1568,6 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 - SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1727,12 +1635,6 @@ const ( TAB2 = 0x800 TAB3 = 0xc00 TABDLY = 0xc00 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x8 TCFLSH = 0x2000741f TCGETA = 0x40147417 TCGETS = 0x402c7413 @@ -1754,7 +1656,6 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1811,7 +1712,6 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1878,7 +1778,6 @@ const ( TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 TOSTOP = 0x400000 - TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 TUNGETFEATURES = 0x400454cf @@ -1903,7 +1802,6 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc - UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0x10 VEOF = 0x4 VEOL = 0x6 @@ -1940,8 +1838,6 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 XCASE = 0x4000 XTABS = 0xc00 ) @@ -2113,6 +2009,7 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 08adb1d8fc3..e946a24942a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2c + AF_MAX = 0x2b AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,7 +51,6 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe - AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -130,7 +129,6 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x17 @@ -327,9 +325,6 @@ const ( ECHOKE = 0x1 ECHONL = 0x10 ECHOPRT = 0x20 - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -394,7 +389,6 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -456,8 +450,6 @@ const ( FF1 = 0x4000 FFDLY = 0x4000 FLUSHO = 0x800000 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -512,19 +504,6 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HUPCL = 0x4000 @@ -662,10 +641,8 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -678,14 +655,12 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -696,10 +671,8 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -713,9 +686,7 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -758,7 +729,6 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -796,7 +766,6 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -929,7 +898,6 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -944,7 +912,6 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -963,7 +930,6 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -986,10 +952,8 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1046,7 +1010,6 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1091,16 +1054,6 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1324,16 +1277,7 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 RLIM_INFINITY = -0x1 RTAX_ADVMSS = 0x8 @@ -1360,7 +1304,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x19 RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1404,7 +1348,6 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1413,7 +1356,6 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1435,11 +1377,10 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x5f RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1454,8 +1395,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1466,7 +1407,6 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1497,12 +1437,8 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1510,16 +1446,6 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1527,9 +1453,7 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1549,21 +1473,13 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x4004667f - SIOCOUTQ = 0x40047473 - SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1582,15 +1498,11 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1636,7 +1548,6 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1645,13 +1556,11 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1659,7 +1568,6 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 - SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1727,12 +1635,6 @@ const ( TAB2 = 0x800 TAB3 = 0xc00 TABDLY = 0xc00 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x8 TCFLSH = 0x2000741f TCGETA = 0x40147417 TCGETS = 0x402c7413 @@ -1754,7 +1656,6 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1811,7 +1712,6 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1878,7 +1778,6 @@ const ( TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 TOSTOP = 0x400000 - TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 TUNGETFEATURES = 0x400454cf @@ -1903,7 +1802,6 @@ const ( TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc - UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0x10 VEOF = 0x4 VEOL = 0x6 @@ -1940,8 +1838,6 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 XCASE = 0x4000 XTABS = 0xc00 ) @@ -2113,6 +2009,7 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 70bc1a2fc5e..4d14fd61aaf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2c + AF_MAX = 0x2b AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,7 +51,6 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe - AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -130,7 +129,6 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -327,9 +325,6 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -394,7 +389,6 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -456,8 +450,6 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -512,19 +504,6 @@ const ( F_ULOCK = 0x0 F_UNLCK = 0x2 F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HUPCL = 0x400 @@ -662,10 +641,8 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -678,14 +655,12 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -696,10 +671,8 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -713,9 +686,7 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -758,7 +729,6 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -796,7 +766,6 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -929,7 +898,6 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -944,7 +912,6 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -963,7 +930,6 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -984,10 +950,8 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1044,7 +1008,6 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1089,16 +1052,6 @@ const ( PARMRK = 0x8 PARODD = 0x200 PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80082407 - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1328,16 +1281,7 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 RLIM_INFINITY = -0x1 RTAX_ADVMSS = 0x8 @@ -1364,7 +1308,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1a + RTA_MAX = 0x19 RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1408,7 +1352,6 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1417,7 +1360,6 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1439,11 +1381,10 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x63 + RTM_MAX = 0x5f RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1458,8 +1399,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x15 - RTM_NR_MSGTYPES = 0x54 + RTM_NR_FAMILIES = 0x14 + RTM_NR_MSGTYPES = 0x50 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1470,7 +1411,6 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1501,12 +1441,8 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1514,16 +1450,6 @@ const ( SIOCADDMULTI = 0x8931 SIOCADDRT = 0x890b SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 SIOCDARP = 0x8953 SIOCDELDLCI = 0x8981 SIOCDELMULTI = 0x8932 @@ -1531,9 +1457,7 @@ const ( SIOCDEVPRIVATE = 0x89f0 SIOCDIFADDR = 0x8936 SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 - SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 SIOCGIFBRDADDR = 0x8919 @@ -1553,21 +1477,13 @@ const ( SIOCGIFPFLAGS = 0x8935 SIOCGIFSLAVE = 0x8929 SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 SIOCGPGRP = 0x8904 SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c SIOCGSTAMP = 0x8906 SIOCGSTAMPNS = 0x8907 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 SIOCSIFADDR = 0x8916 SIOCSIFBR = 0x8941 SIOCSIFBRDADDR = 0x891a @@ -1586,15 +1502,11 @@ const ( SIOCSIFPFLAGS = 0x8934 SIOCSIFSLAVE = 0x8930 SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 SOCK_NONBLOCK = 0x800 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -1640,7 +1552,6 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1649,13 +1560,11 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1663,7 +1572,6 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1731,12 +1639,6 @@ const ( TAB2 = 0x1000 TAB3 = 0x1800 TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x8 TCFLSH = 0x540b TCGETA = 0x5405 TCGETS = 0x5401 @@ -1760,7 +1662,6 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1820,7 +1721,6 @@ const ( TIOCGPKT = 0x80045438 TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1878,7 +1778,6 @@ const ( TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 TOSTOP = 0x100 - TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 TUNGETFEATURES = 0x800454cf @@ -1903,7 +1802,6 @@ const ( TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc - UMOUNT_NOFOLLOW = 0x8 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1940,8 +1838,6 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x2 WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 XCASE = 0x4 XTABS = 0x1800 ) @@ -2113,6 +2009,7 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) + SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 206c75f094e..ac85ca64529 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -1006,9 +1006,6 @@ const ( MSG_TRUNC = 0x10 MSG_USERFLAGS = 0xffffff MSG_WAITALL = 0x40 - MS_ASYNC = 0x1 - MS_INVALIDATE = 0x2 - MS_SYNC = 0x4 NAME_MAX = 0x1ff NET_RT_DUMP = 0x1 NET_RT_FLAGS = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go deleted file mode 100644 index 3ed0b2602f9..00000000000 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ /dev/null @@ -1,1586 +0,0 @@ -// mkerrors.sh -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT - -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs -- _const.go - -// +build arm,openbsd - -package unix - -import "syscall" - -const ( - AF_APPLETALK = 0x10 - AF_BLUETOOTH = 0x20 - AF_CCITT = 0xa - AF_CHAOS = 0x5 - AF_CNT = 0x15 - AF_COIP = 0x14 - AF_DATAKIT = 0x9 - AF_DECnet = 0xc - AF_DLI = 0xd - AF_E164 = 0x1a - AF_ECMA = 0x8 - AF_ENCAP = 0x1c - AF_HYLINK = 0xf - AF_IMPLINK = 0x3 - AF_INET = 0x2 - AF_INET6 = 0x18 - AF_IPX = 0x17 - AF_ISDN = 0x1a - AF_ISO = 0x7 - AF_KEY = 0x1e - AF_LAT = 0xe - AF_LINK = 0x12 - AF_LOCAL = 0x1 - AF_MAX = 0x24 - AF_MPLS = 0x21 - AF_NATM = 0x1b - AF_NS = 0x6 - AF_OSI = 0x7 - AF_PUP = 0x4 - AF_ROUTE = 0x11 - AF_SIP = 0x1d - AF_SNA = 0xb - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - ARPHRD_ETHER = 0x1 - ARPHRD_FRELAY = 0xf - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - B0 = 0x0 - B110 = 0x6e - B115200 = 0x1c200 - B1200 = 0x4b0 - B134 = 0x86 - B14400 = 0x3840 - B150 = 0x96 - B1800 = 0x708 - B19200 = 0x4b00 - B200 = 0xc8 - B230400 = 0x38400 - B2400 = 0x960 - B28800 = 0x7080 - B300 = 0x12c - B38400 = 0x9600 - B4800 = 0x12c0 - B50 = 0x32 - B57600 = 0xe100 - B600 = 0x258 - B7200 = 0x1c20 - B75 = 0x4b - B76800 = 0x12c00 - B9600 = 0x2580 - BIOCFLUSH = 0x20004268 - BIOCGBLEN = 0x40044266 - BIOCGDIRFILT = 0x4004427c - BIOCGDLT = 0x4004426a - BIOCGDLTLIST = 0xc008427b - BIOCGETIF = 0x4020426b - BIOCGFILDROP = 0x40044278 - BIOCGHDRCMPLT = 0x40044274 - BIOCGRSIG = 0x40044273 - BIOCGRTIMEOUT = 0x400c426e - BIOCGSTATS = 0x4008426f - BIOCIMMEDIATE = 0x80044270 - BIOCLOCK = 0x20004276 - BIOCPROMISC = 0x20004269 - BIOCSBLEN = 0xc0044266 - BIOCSDIRFILT = 0x8004427d - BIOCSDLT = 0x8004427a - BIOCSETF = 0x80084267 - BIOCSETIF = 0x8020426c - BIOCSETWF = 0x80084277 - BIOCSFILDROP = 0x80044279 - BIOCSHDRCMPLT = 0x80044275 - BIOCSRSIG = 0x80044272 - BIOCSRTIMEOUT = 0x800c426d - BIOCVERSION = 0x40044271 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ALIGNMENT = 0x4 - BPF_ALU = 0x4 - BPF_AND = 0x50 - BPF_B = 0x10 - BPF_DIRECTION_IN = 0x1 - BPF_DIRECTION_OUT = 0x2 - BPF_DIV = 0x30 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JMP = 0x5 - BPF_JSET = 0x40 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXBUFSIZE = 0x200000 - BPF_MAXINSNS = 0x200 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINBUFSIZE = 0x20 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_OR = 0x40 - BPF_RELEASE = 0x30bb6 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAX = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BRKINT = 0x2 - CFLUSH = 0xf - CLOCAL = 0x8000 - CREAD = 0x800 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0xff - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - CTL_MAXNAME = 0xc - CTL_NET = 0x4 - DIOCOSFPFLUSH = 0x2000444e - DLT_ARCNET = 0x7 - DLT_ATM_RFC1483 = 0xb - DLT_AX25 = 0x3 - DLT_CHAOS = 0x5 - DLT_C_HDLC = 0x68 - DLT_EN10MB = 0x1 - DLT_EN3MB = 0x2 - DLT_ENC = 0xd - DLT_FDDI = 0xa - DLT_IEEE802 = 0x6 - DLT_IEEE802_11 = 0x69 - DLT_IEEE802_11_RADIO = 0x7f - DLT_LOOP = 0xc - DLT_MPLS = 0xdb - DLT_NULL = 0x0 - DLT_PFLOG = 0x75 - DLT_PFSYNC = 0x12 - DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 - DLT_PPP_ETHER = 0x33 - DLT_PPP_SERIAL = 0x32 - DLT_PRONET = 0x4 - DLT_RAW = 0xe - DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - EMT_TAGOVF = 0x1 - EMUL_ENABLED = 0x1 - EMUL_NATIVE = 0x2 - ENDRUNDISC = 0x9 - ETHERMIN = 0x2e - ETHERMTU = 0x5dc - ETHERTYPE_8023 = 0x4 - ETHERTYPE_AARP = 0x80f3 - ETHERTYPE_ACCTON = 0x8390 - ETHERTYPE_AEONIC = 0x8036 - ETHERTYPE_ALPHA = 0x814a - ETHERTYPE_AMBER = 0x6008 - ETHERTYPE_AMOEBA = 0x8145 - ETHERTYPE_AOE = 0x88a2 - ETHERTYPE_APOLLO = 0x80f7 - ETHERTYPE_APOLLODOMAIN = 0x8019 - ETHERTYPE_APPLETALK = 0x809b - ETHERTYPE_APPLITEK = 0x80c7 - ETHERTYPE_ARGONAUT = 0x803a - ETHERTYPE_ARP = 0x806 - ETHERTYPE_AT = 0x809b - ETHERTYPE_ATALK = 0x809b - ETHERTYPE_ATOMIC = 0x86df - ETHERTYPE_ATT = 0x8069 - ETHERTYPE_ATTSTANFORD = 0x8008 - ETHERTYPE_AUTOPHON = 0x806a - ETHERTYPE_AXIS = 0x8856 - ETHERTYPE_BCLOOP = 0x9003 - ETHERTYPE_BOFL = 0x8102 - ETHERTYPE_CABLETRON = 0x7034 - ETHERTYPE_CHAOS = 0x804 - ETHERTYPE_COMDESIGN = 0x806c - ETHERTYPE_COMPUGRAPHIC = 0x806d - ETHERTYPE_COUNTERPOINT = 0x8062 - ETHERTYPE_CRONUS = 0x8004 - ETHERTYPE_CRONUSVLN = 0x8003 - ETHERTYPE_DCA = 0x1234 - ETHERTYPE_DDE = 0x807b - ETHERTYPE_DEBNI = 0xaaaa - ETHERTYPE_DECAM = 0x8048 - ETHERTYPE_DECCUST = 0x6006 - ETHERTYPE_DECDIAG = 0x6005 - ETHERTYPE_DECDNS = 0x803c - ETHERTYPE_DECDTS = 0x803e - ETHERTYPE_DECEXPER = 0x6000 - ETHERTYPE_DECLAST = 0x8041 - ETHERTYPE_DECLTM = 0x803f - ETHERTYPE_DECMUMPS = 0x6009 - ETHERTYPE_DECNETBIOS = 0x8040 - ETHERTYPE_DELTACON = 0x86de - ETHERTYPE_DIDDLE = 0x4321 - ETHERTYPE_DLOG1 = 0x660 - ETHERTYPE_DLOG2 = 0x661 - ETHERTYPE_DN = 0x6003 - ETHERTYPE_DOGFIGHT = 0x1989 - ETHERTYPE_DSMD = 0x8039 - ETHERTYPE_ECMA = 0x803 - ETHERTYPE_ENCRYPT = 0x803d - ETHERTYPE_ES = 0x805d - ETHERTYPE_EXCELAN = 0x8010 - ETHERTYPE_EXPERDATA = 0x8049 - ETHERTYPE_FLIP = 0x8146 - ETHERTYPE_FLOWCONTROL = 0x8808 - ETHERTYPE_FRARP = 0x808 - ETHERTYPE_GENDYN = 0x8068 - ETHERTYPE_HAYES = 0x8130 - ETHERTYPE_HIPPI_FP = 0x8180 - ETHERTYPE_HITACHI = 0x8820 - ETHERTYPE_HP = 0x8005 - ETHERTYPE_IEEEPUP = 0xa00 - ETHERTYPE_IEEEPUPAT = 0xa01 - ETHERTYPE_IMLBL = 0x4c42 - ETHERTYPE_IMLBLDIAG = 0x424c - ETHERTYPE_IP = 0x800 - ETHERTYPE_IPAS = 0x876c - ETHERTYPE_IPV6 = 0x86dd - ETHERTYPE_IPX = 0x8137 - ETHERTYPE_IPXNEW = 0x8037 - ETHERTYPE_KALPANA = 0x8582 - ETHERTYPE_LANBRIDGE = 0x8038 - ETHERTYPE_LANPROBE = 0x8888 - ETHERTYPE_LAT = 0x6004 - ETHERTYPE_LBACK = 0x9000 - ETHERTYPE_LITTLE = 0x8060 - ETHERTYPE_LLDP = 0x88cc - ETHERTYPE_LOGICRAFT = 0x8148 - ETHERTYPE_LOOPBACK = 0x9000 - ETHERTYPE_MATRA = 0x807a - ETHERTYPE_MAX = 0xffff - ETHERTYPE_MERIT = 0x807c - ETHERTYPE_MICP = 0x873a - ETHERTYPE_MOPDL = 0x6001 - ETHERTYPE_MOPRC = 0x6002 - ETHERTYPE_MOTOROLA = 0x818d - ETHERTYPE_MPLS = 0x8847 - ETHERTYPE_MPLS_MCAST = 0x8848 - ETHERTYPE_MUMPS = 0x813f - ETHERTYPE_NBPCC = 0x3c04 - ETHERTYPE_NBPCLAIM = 0x3c09 - ETHERTYPE_NBPCLREQ = 0x3c05 - ETHERTYPE_NBPCLRSP = 0x3c06 - ETHERTYPE_NBPCREQ = 0x3c02 - ETHERTYPE_NBPCRSP = 0x3c03 - ETHERTYPE_NBPDG = 0x3c07 - ETHERTYPE_NBPDGB = 0x3c08 - ETHERTYPE_NBPDLTE = 0x3c0a - ETHERTYPE_NBPRAR = 0x3c0c - ETHERTYPE_NBPRAS = 0x3c0b - ETHERTYPE_NBPRST = 0x3c0d - ETHERTYPE_NBPSCD = 0x3c01 - ETHERTYPE_NBPVCD = 0x3c00 - ETHERTYPE_NBS = 0x802 - ETHERTYPE_NCD = 0x8149 - ETHERTYPE_NESTAR = 0x8006 - ETHERTYPE_NETBEUI = 0x8191 - ETHERTYPE_NOVELL = 0x8138 - ETHERTYPE_NS = 0x600 - ETHERTYPE_NSAT = 0x601 - ETHERTYPE_NSCOMPAT = 0x807 - ETHERTYPE_NTRAILER = 0x10 - ETHERTYPE_OS9 = 0x7007 - ETHERTYPE_OS9NET = 0x7009 - ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e - ETHERTYPE_PCS = 0x4242 - ETHERTYPE_PLANNING = 0x8044 - ETHERTYPE_PPP = 0x880b - ETHERTYPE_PPPOE = 0x8864 - ETHERTYPE_PPPOEDISC = 0x8863 - ETHERTYPE_PRIMENTS = 0x7031 - ETHERTYPE_PUP = 0x200 - ETHERTYPE_PUPAT = 0x200 - ETHERTYPE_QINQ = 0x88a8 - ETHERTYPE_RACAL = 0x7030 - ETHERTYPE_RATIONAL = 0x8150 - ETHERTYPE_RAWFR = 0x6559 - ETHERTYPE_RCL = 0x1995 - ETHERTYPE_RDP = 0x8739 - ETHERTYPE_RETIX = 0x80f2 - ETHERTYPE_REVARP = 0x8035 - ETHERTYPE_SCA = 0x6007 - ETHERTYPE_SECTRA = 0x86db - ETHERTYPE_SECUREDATA = 0x876d - ETHERTYPE_SGITW = 0x817e - ETHERTYPE_SG_BOUNCE = 0x8016 - ETHERTYPE_SG_DIAG = 0x8013 - ETHERTYPE_SG_NETGAMES = 0x8014 - ETHERTYPE_SG_RESV = 0x8015 - ETHERTYPE_SIMNET = 0x5208 - ETHERTYPE_SLOW = 0x8809 - ETHERTYPE_SNA = 0x80d5 - ETHERTYPE_SNMP = 0x814c - ETHERTYPE_SONIX = 0xfaf5 - ETHERTYPE_SPIDER = 0x809f - ETHERTYPE_SPRITE = 0x500 - ETHERTYPE_STP = 0x8181 - ETHERTYPE_TALARIS = 0x812b - ETHERTYPE_TALARISMC = 0x852b - ETHERTYPE_TCPCOMP = 0x876b - ETHERTYPE_TCPSM = 0x9002 - ETHERTYPE_TEC = 0x814f - ETHERTYPE_TIGAN = 0x802f - ETHERTYPE_TRAIL = 0x1000 - ETHERTYPE_TRANSETHER = 0x6558 - ETHERTYPE_TYMSHARE = 0x802e - ETHERTYPE_UBBST = 0x7005 - ETHERTYPE_UBDEBUG = 0x900 - ETHERTYPE_UBDIAGLOOP = 0x7002 - ETHERTYPE_UBDL = 0x7000 - ETHERTYPE_UBNIU = 0x7001 - ETHERTYPE_UBNMC = 0x7003 - ETHERTYPE_VALID = 0x1600 - ETHERTYPE_VARIAN = 0x80dd - ETHERTYPE_VAXELN = 0x803b - ETHERTYPE_VEECO = 0x8067 - ETHERTYPE_VEXP = 0x805b - ETHERTYPE_VGLAB = 0x8131 - ETHERTYPE_VINES = 0xbad - ETHERTYPE_VINESECHO = 0xbaf - ETHERTYPE_VINESLOOP = 0xbae - ETHERTYPE_VITAL = 0xff00 - ETHERTYPE_VLAN = 0x8100 - ETHERTYPE_VLTLMAN = 0x8080 - ETHERTYPE_VPROD = 0x805c - ETHERTYPE_VURESERVED = 0x8147 - ETHERTYPE_WATERLOO = 0x8130 - ETHERTYPE_WELLFLEET = 0x8103 - ETHERTYPE_X25 = 0x805 - ETHERTYPE_X75 = 0x801 - ETHERTYPE_XNSSM = 0x9001 - ETHERTYPE_XTP = 0x817d - ETHER_ADDR_LEN = 0x6 - ETHER_ALIGN = 0x2 - ETHER_CRC_LEN = 0x4 - ETHER_CRC_POLY_BE = 0x4c11db6 - ETHER_CRC_POLY_LE = 0xedb88320 - ETHER_HDR_LEN = 0xe - ETHER_MAX_DIX_LEN = 0x600 - ETHER_MAX_LEN = 0x5ee - ETHER_MIN_LEN = 0x40 - ETHER_TYPE_LEN = 0x2 - ETHER_VLAN_ENCAP_LEN = 0x4 - EVFILT_AIO = -0x3 - EVFILT_PROC = -0x5 - EVFILT_READ = -0x1 - EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x7 - EVFILT_TIMER = -0x7 - EVFILT_VNODE = -0x4 - EVFILT_WRITE = -0x2 - EV_ADD = 0x1 - EV_CLEAR = 0x20 - EV_DELETE = 0x2 - EV_DISABLE = 0x8 - EV_ENABLE = 0x4 - EV_EOF = 0x8000 - EV_ERROR = 0x4000 - EV_FLAG1 = 0x2000 - EV_ONESHOT = 0x10 - EV_SYSFLAGS = 0xf000 - EXTA = 0x4b00 - EXTB = 0x9600 - EXTPROC = 0x800 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FLUSHO = 0x800000 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0xa - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLK = 0x7 - F_GETOWN = 0x5 - F_RDLCK = 0x1 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLK = 0x8 - F_SETLKW = 0x9 - F_SETOWN = 0x6 - F_UNLCK = 0x2 - F_WRLCK = 0x3 - HUPCL = 0x4000 - ICANON = 0x100 - ICMP6_FILTER = 0x12 - ICRNL = 0x100 - IEXTEN = 0x400 - IFAN_ARRIVAL = 0x0 - IFAN_DEPARTURE = 0x1 - IFA_ROUTE = 0x1 - IFF_ALLMULTI = 0x200 - IFF_BROADCAST = 0x2 - IFF_CANTCHANGE = 0x8e52 - IFF_DEBUG = 0x4 - IFF_LINK0 = 0x1000 - IFF_LINK1 = 0x2000 - IFF_LINK2 = 0x4000 - IFF_LOOPBACK = 0x8 - IFF_MULTICAST = 0x8000 - IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 - IFF_OACTIVE = 0x400 - IFF_POINTOPOINT = 0x10 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SIMPLEX = 0x800 - IFF_UP = 0x1 - IFNAMSIZ = 0x10 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BLUETOOTH = 0xf8 - IFT_BRIDGE = 0xd1 - IFT_BSC = 0x53 - IFT_CARP = 0xf7 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DUMMY = 0xf1 - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ECONET = 0xce - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf3 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE1394 = 0x90 - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INFINIBAND = 0xc7 - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L2VLAN = 0x87 - IFT_L3IPVLAN = 0x88 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LINEGROUP = 0xd2 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf5 - IFT_PFLOW = 0xf9 - IFT_PFSYNC = 0xf6 - IFT_PLC = 0xae - IFT_PON155 = 0xcf - IFT_PON622 = 0xd0 - IFT_POS = 0xab - IFT_PPP = 0x17 - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPATM = 0xc5 - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPVIRTUAL = 0x35 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf2 - IFT_Q2931 = 0xc9 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SIPSIG = 0xcc - IFT_SIPTG = 0xcb - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TELINK = 0xc8 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VIRTUALTG = 0xca - IFT_VOICEDID = 0xd5 - IFT_VOICEEM = 0x64 - IFT_VOICEEMFGD = 0xd3 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFGDEANA = 0xd4 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERCABLE = 0xc6 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLASSD_HOST = 0xfffffff - IN_CLASSD_NET = 0xf0000000 - IN_CLASSD_NSHIFT = 0x1c - IN_LOOPBACKNET = 0x7f - IN_RFC3021_HOST = 0x1 - IN_RFC3021_NET = 0xfffffffe - IN_RFC3021_NSHIFT = 0x1f - IPPROTO_AH = 0x33 - IPPROTO_CARP = 0x70 - IPPROTO_DIVERT = 0x102 - IPPROTO_DIVERT_INIT = 0x2 - IPPROTO_DIVERT_RESP = 0x1 - IPPROTO_DONE = 0x101 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_EON = 0x50 - IPPROTO_ESP = 0x32 - IPPROTO_ETHERIP = 0x61 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GGP = 0x3 - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPCOMP = 0x6c - IPPROTO_IPIP = 0x4 - IPPROTO_IPV4 = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MAX = 0x100 - IPPROTO_MAXID = 0x103 - IPPROTO_MOBILE = 0x37 - IPPROTO_MPLS = 0x89 - IPPROTO_NONE = 0x3b - IPPROTO_PFSYNC = 0xf0 - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPV6_AUTH_LEVEL = 0x35 - IPV6_AUTOFLOWLABEL = 0x3b - IPV6_CHECKSUM = 0x1a - IPV6_DEFAULT_MULTICAST_HOPS = 0x1 - IPV6_DEFAULT_MULTICAST_LOOP = 0x1 - IPV6_DEFHLIM = 0x40 - IPV6_DONTFRAG = 0x3e - IPV6_DSTOPTS = 0x32 - IPV6_ESP_NETWORK_LEVEL = 0x37 - IPV6_ESP_TRANS_LEVEL = 0x36 - IPV6_FAITH = 0x1d - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 - IPV6_FRAGTTL = 0x78 - IPV6_HLIMDEC = 0x1 - IPV6_HOPLIMIT = 0x2f - IPV6_HOPOPTS = 0x31 - IPV6_IPCOMP_LEVEL = 0x3c - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_MAXHLIM = 0xff - IPV6_MAXPACKET = 0xffff - IPV6_MMTU = 0x500 - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_LOOP = 0xb - IPV6_NEXTHOP = 0x30 - IPV6_OPTIONS = 0x1 - IPV6_PATHMTU = 0x2c - IPV6_PIPEX = 0x3f - IPV6_PKTINFO = 0x2e - IPV6_PORTRANGE = 0xe - IPV6_PORTRANGE_DEFAULT = 0x0 - IPV6_PORTRANGE_HIGH = 0x1 - IPV6_PORTRANGE_LOW = 0x2 - IPV6_RECVDSTOPTS = 0x28 - IPV6_RECVDSTPORT = 0x40 - IPV6_RECVHOPLIMIT = 0x25 - IPV6_RECVHOPOPTS = 0x27 - IPV6_RECVPATHMTU = 0x2b - IPV6_RECVPKTINFO = 0x24 - IPV6_RECVRTHDR = 0x26 - IPV6_RECVTCLASS = 0x39 - IPV6_RTABLE = 0x1021 - IPV6_RTHDR = 0x33 - IPV6_RTHDRDSTOPTS = 0x23 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_SOCKOPT_RESERVED1 = 0x3 - IPV6_TCLASS = 0x3d - IPV6_UNICAST_HOPS = 0x4 - IPV6_USE_MIN_MTU = 0x2a - IPV6_V6ONLY = 0x1b - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 - IP_ADD_MEMBERSHIP = 0xc - IP_AUTH_LEVEL = 0x14 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DIVERTFL = 0x1022 - IP_DROP_MEMBERSHIP = 0xd - IP_ESP_NETWORK_LEVEL = 0x16 - IP_ESP_TRANS_LEVEL = 0x15 - IP_HDRINCL = 0x2 - IP_IPCOMP_LEVEL = 0x1d - IP_IPSECFLOWINFO = 0x24 - IP_IPSEC_LOCAL_AUTH = 0x1b - IP_IPSEC_LOCAL_CRED = 0x19 - IP_IPSEC_LOCAL_ID = 0x17 - IP_IPSEC_REMOTE_AUTH = 0x1c - IP_IPSEC_REMOTE_CRED = 0x1a - IP_IPSEC_REMOTE_ID = 0x18 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0xfff - IP_MF = 0x2000 - IP_MINTTL = 0x20 - IP_MIN_MEMBERSHIPS = 0xf - IP_MSS = 0x240 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_LOOP = 0xb - IP_MULTICAST_TTL = 0xa - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x1 - IP_PIPEX = 0x22 - IP_PORTRANGE = 0x13 - IP_PORTRANGE_DEFAULT = 0x0 - IP_PORTRANGE_HIGH = 0x1 - IP_PORTRANGE_LOW = 0x2 - IP_RECVDSTADDR = 0x7 - IP_RECVDSTPORT = 0x21 - IP_RECVIF = 0x1e - IP_RECVOPTS = 0x5 - IP_RECVRETOPTS = 0x6 - IP_RECVRTABLE = 0x23 - IP_RECVTTL = 0x1f - IP_RETOPTS = 0x8 - IP_RF = 0x8000 - IP_RTABLE = 0x1021 - IP_TOS = 0x3 - IP_TTL = 0x4 - ISIG = 0x80 - ISTRIP = 0x20 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - LCNT_OVERLOAD_FLUSH = 0x6 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - MADV_DONTNEED = 0x4 - MADV_FREE = 0x6 - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_SEQUENTIAL = 0x2 - MADV_SPACEAVAIL = 0x5 - MADV_WILLNEED = 0x3 - MAP_ANON = 0x1000 - MAP_ANONYMOUS = 0x1000 - MAP_COPY = 0x2 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FLAGMASK = 0x3ff7 - MAP_HASSEMAPHORE = 0x0 - MAP_INHERIT = 0x0 - MAP_INHERIT_COPY = 0x1 - MAP_INHERIT_NONE = 0x2 - MAP_INHERIT_SHARE = 0x0 - MAP_INHERIT_ZERO = 0x3 - MAP_NOEXTEND = 0x0 - MAP_NORESERVE = 0x0 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x0 - MAP_SHARED = 0x1 - MAP_TRYFIXED = 0x0 - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MSG_BCAST = 0x100 - MSG_CMSG_CLOEXEC = 0x800 - MSG_CTRUNC = 0x20 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x80 - MSG_EOR = 0x8 - MSG_MCAST = 0x200 - MSG_NOSIGNAL = 0x400 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_TRUNC = 0x10 - MSG_WAITALL = 0x40 - MS_ASYNC = 0x1 - MS_INVALIDATE = 0x4 - MS_SYNC = 0x2 - NAME_MAX = 0xff - NET_RT_DUMP = 0x1 - NET_RT_FLAGS = 0x2 - NET_RT_IFLIST = 0x3 - NET_RT_MAXID = 0x6 - NET_RT_STATS = 0x4 - NET_RT_TABLE = 0x5 - NOFLSH = 0x80000000 - NOTE_ATTRIB = 0x8 - NOTE_CHILD = 0x4 - NOTE_DELETE = 0x1 - NOTE_EOF = 0x2 - NOTE_EXEC = 0x20000000 - NOTE_EXIT = 0x80000000 - NOTE_EXTEND = 0x4 - NOTE_FORK = 0x40000000 - NOTE_LINK = 0x10 - NOTE_LOWAT = 0x1 - NOTE_PCTRLMASK = 0xf0000000 - NOTE_PDATAMASK = 0xfffff - NOTE_RENAME = 0x20 - NOTE_REVOKE = 0x40 - NOTE_TRACK = 0x1 - NOTE_TRACKERR = 0x2 - NOTE_TRUNCATE = 0x80 - NOTE_WRITE = 0x2 - OCRNL = 0x10 - ONLCR = 0x2 - ONLRET = 0x80 - ONOCR = 0x40 - ONOEOT = 0x8 - OPOST = 0x1 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x10000 - O_CREAT = 0x200 - O_DIRECTORY = 0x20000 - O_DSYNC = 0x80 - O_EXCL = 0x800 - O_EXLOCK = 0x20 - O_FSYNC = 0x80 - O_NDELAY = 0x4 - O_NOCTTY = 0x8000 - O_NOFOLLOW = 0x100 - O_NONBLOCK = 0x4 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x80 - O_SHLOCK = 0x10 - O_SYNC = 0x80 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PARENB = 0x1000 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PF_FLUSH = 0x1 - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROT_EXEC = 0x4 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_NOFILE = 0x8 - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0x7fffffffffffffff - RTAX_AUTHOR = 0x6 - RTAX_BRD = 0x7 - RTAX_DST = 0x0 - RTAX_GATEWAY = 0x1 - RTAX_GENMASK = 0x3 - RTAX_IFA = 0x5 - RTAX_IFP = 0x4 - RTAX_LABEL = 0xa - RTAX_MAX = 0xb - RTAX_NETMASK = 0x2 - RTAX_SRC = 0x8 - RTAX_SRCMASK = 0x9 - RTA_AUTHOR = 0x40 - RTA_BRD = 0x80 - RTA_DST = 0x1 - RTA_GATEWAY = 0x2 - RTA_GENMASK = 0x8 - RTA_IFA = 0x20 - RTA_IFP = 0x10 - RTA_LABEL = 0x400 - RTA_NETMASK = 0x4 - RTA_SRC = 0x100 - RTA_SRCMASK = 0x200 - RTF_ANNOUNCE = 0x4000 - RTF_BLACKHOLE = 0x1000 - RTF_BROADCAST = 0x400000 - RTF_CLONED = 0x10000 - RTF_CLONING = 0x100 - RTF_DONE = 0x40 - RTF_DYNAMIC = 0x10 - RTF_FMASK = 0x70f808 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_LLINFO = 0x400 - RTF_LOCAL = 0x200000 - RTF_MASK = 0x80 - RTF_MODIFIED = 0x20 - RTF_MPATH = 0x40000 - RTF_MPLS = 0x100000 - RTF_PERMANENT_ARP = 0x2000 - RTF_PROTO1 = 0x8000 - RTF_PROTO2 = 0x4000 - RTF_PROTO3 = 0x2000 - RTF_REJECT = 0x8 - RTF_STATIC = 0x800 - RTF_UP = 0x1 - RTF_USETRAILERS = 0x8000 - RTF_XRESOLVE = 0x200 - RTM_ADD = 0x1 - RTM_CHANGE = 0x3 - RTM_DELADDR = 0xd - RTM_DELETE = 0x2 - RTM_DESYNC = 0x10 - RTM_GET = 0x4 - RTM_IFANNOUNCE = 0xf - RTM_IFINFO = 0xe - RTM_LOCK = 0x8 - RTM_LOSING = 0x5 - RTM_MAXSIZE = 0x800 - RTM_MISS = 0x7 - RTM_NEWADDR = 0xc - RTM_REDIRECT = 0x6 - RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 - RTM_VERSION = 0x5 - RTV_EXPIRE = 0x4 - RTV_HOPCOUNT = 0x2 - RTV_MTU = 0x1 - RTV_RPIPE = 0x8 - RTV_RTT = 0x40 - RTV_RTTVAR = 0x80 - RTV_SPIPE = 0x10 - RTV_SSTHRESH = 0x20 - RT_TABLEID_MAX = 0xff - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x4 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDMULTI = 0x80206931 - SIOCAIFADDR = 0x8040691a - SIOCAIFGROUP = 0x80246987 - SIOCALIFADDR = 0x8218691c - SIOCATMARK = 0x40047307 - SIOCBRDGADD = 0x8054693c - SIOCBRDGADDS = 0x80546941 - SIOCBRDGARL = 0x806e694d - SIOCBRDGDADDR = 0x81286947 - SIOCBRDGDEL = 0x8054693d - SIOCBRDGDELS = 0x80546942 - SIOCBRDGFLUSH = 0x80546948 - SIOCBRDGFRL = 0x806e694e - SIOCBRDGGCACHE = 0xc0146941 - SIOCBRDGGFD = 0xc0146952 - SIOCBRDGGHT = 0xc0146951 - SIOCBRDGGIFFLGS = 0xc054693e - SIOCBRDGGMA = 0xc0146953 - SIOCBRDGGPARAM = 0xc03c6958 - SIOCBRDGGPRI = 0xc0146950 - SIOCBRDGGRL = 0xc028694f - SIOCBRDGGSIFS = 0xc054693c - SIOCBRDGGTO = 0xc0146946 - SIOCBRDGIFS = 0xc0546942 - SIOCBRDGRTS = 0xc0186943 - SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80146940 - SIOCBRDGSFD = 0x80146952 - SIOCBRDGSHT = 0x80146951 - SIOCBRDGSIFCOST = 0x80546955 - SIOCBRDGSIFFLGS = 0x8054693f - SIOCBRDGSIFPRIO = 0x80546954 - SIOCBRDGSMA = 0x80146953 - SIOCBRDGSPRI = 0x80146950 - SIOCBRDGSPROTO = 0x8014695a - SIOCBRDGSTO = 0x80146945 - SIOCBRDGSTXHC = 0x80146959 - SIOCDELMULTI = 0x80206932 - SIOCDIFADDR = 0x80206919 - SIOCDIFGROUP = 0x80246989 - SIOCDIFPHYADDR = 0x80206949 - SIOCDLIFADDR = 0x8218691e - SIOCGETKALIVE = 0xc01869a4 - SIOCGETLABEL = 0x8020699a - SIOCGETPFLOW = 0xc02069fe - SIOCGETPFSYNC = 0xc02069f8 - SIOCGETSGCNT = 0xc0147534 - SIOCGETVIFCNT = 0xc0147533 - SIOCGETVLAN = 0xc0206990 - SIOCGHIWAT = 0x40047301 - SIOCGIFADDR = 0xc0206921 - SIOCGIFASYNCMAP = 0xc020697c - SIOCGIFBRDADDR = 0xc0206923 - SIOCGIFCONF = 0xc0086924 - SIOCGIFDATA = 0xc020691b - SIOCGIFDESCR = 0xc0206981 - SIOCGIFDSTADDR = 0xc0206922 - SIOCGIFFLAGS = 0xc0206911 - SIOCGIFGATTR = 0xc024698b - SIOCGIFGENERIC = 0xc020693a - SIOCGIFGMEMB = 0xc024698a - SIOCGIFGROUP = 0xc0246988 - SIOCGIFHARDMTU = 0xc02069a5 - SIOCGIFMEDIA = 0xc0286936 - SIOCGIFMETRIC = 0xc0206917 - SIOCGIFMTU = 0xc020697e - SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 - SIOCGIFPRIORITY = 0xc020699c - SIOCGIFPSRCADDR = 0xc0206947 - SIOCGIFRDOMAIN = 0xc02069a0 - SIOCGIFRTLABEL = 0xc0206983 - SIOCGIFRXR = 0x802069aa - SIOCGIFTIMESLOT = 0xc0206986 - SIOCGIFXFLAGS = 0xc020699e - SIOCGLIFADDR = 0xc218691d - SIOCGLIFPHYADDR = 0xc218694b - SIOCGLIFPHYRTABLE = 0xc02069a2 - SIOCGLIFPHYTTL = 0xc02069a9 - SIOCGLOWAT = 0x40047303 - SIOCGPGRP = 0x40047309 - SIOCGSPPPPARAMS = 0xc0206994 - SIOCGVH = 0xc02069f6 - SIOCGVNETID = 0xc02069a7 - SIOCIFCREATE = 0x8020697a - SIOCIFDESTROY = 0x80206979 - SIOCIFGCLONERS = 0xc00c6978 - SIOCSETKALIVE = 0x801869a3 - SIOCSETLABEL = 0x80206999 - SIOCSETPFLOW = 0x802069fd - SIOCSETPFSYNC = 0x802069f7 - SIOCSETVLAN = 0x8020698f - SIOCSHIWAT = 0x80047300 - SIOCSIFADDR = 0x8020690c - SIOCSIFASYNCMAP = 0x8020697d - SIOCSIFBRDADDR = 0x80206913 - SIOCSIFDESCR = 0x80206980 - SIOCSIFDSTADDR = 0x8020690e - SIOCSIFFLAGS = 0x80206910 - SIOCSIFGATTR = 0x8024698c - SIOCSIFGENERIC = 0x80206939 - SIOCSIFLLADDR = 0x8020691f - SIOCSIFMEDIA = 0xc0206935 - SIOCSIFMETRIC = 0x80206918 - SIOCSIFMTU = 0x8020697f - SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 - SIOCSIFPRIORITY = 0x8020699b - SIOCSIFRDOMAIN = 0x8020699f - SIOCSIFRTLABEL = 0x80206982 - SIOCSIFTIMESLOT = 0x80206985 - SIOCSIFXFLAGS = 0x8020699d - SIOCSLIFPHYADDR = 0x8218694a - SIOCSLIFPHYRTABLE = 0x802069a1 - SIOCSLIFPHYTTL = 0x802069a8 - SIOCSLOWAT = 0x80047302 - SIOCSPGRP = 0x80047308 - SIOCSSPPPPARAMS = 0x80206993 - SIOCSVH = 0xc02069f5 - SIOCSVNETID = 0x802069a6 - SOCK_CLOEXEC = 0x8000 - SOCK_DGRAM = 0x2 - SOCK_NONBLOCK = 0x4000 - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_SOCKET = 0xffff - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x2 - SO_BINDANY = 0x1000 - SO_BROADCAST = 0x20 - SO_DEBUG = 0x1 - SO_DONTROUTE = 0x10 - SO_ERROR = 0x1007 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_NETPROC = 0x1020 - SO_OOBINLINE = 0x100 - SO_PEERCRED = 0x1022 - SO_RCVBUF = 0x1002 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RTABLE = 0x1021 - SO_SNDBUF = 0x1001 - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_SPLICE = 0x1023 - SO_TIMESTAMP = 0x800 - SO_TYPE = 0x1008 - SO_USELOOPBACK = 0x40 - TCIFLUSH = 0x1 - TCIOFLUSH = 0x3 - TCOFLUSH = 0x2 - TCP_MAXBURST = 0x4 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_SACK = 0x3 - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0x4 - TCP_MSS = 0x200 - TCP_NODELAY = 0x1 - TCP_NOPUSH = 0x10 - TCP_NSTATES = 0xb - TCP_SACK_ENABLE = 0x8 - TCSAFLUSH = 0x2 - TIOCCBRK = 0x2000747a - TIOCCDTR = 0x20007478 - TIOCCONS = 0x80047462 - TIOCDRAIN = 0x2000745e - TIOCEXCL = 0x2000740d - TIOCEXT = 0x80047460 - TIOCFLAG_CLOCAL = 0x2 - TIOCFLAG_CRTSCTS = 0x4 - TIOCFLAG_MDMBUF = 0x8 - TIOCFLAG_PPS = 0x10 - TIOCFLAG_SOFTCAR = 0x1 - TIOCFLUSH = 0x80047410 - TIOCGETA = 0x402c7413 - TIOCGETD = 0x4004741a - TIOCGFLAGS = 0x4004745d - TIOCGPGRP = 0x40047477 - TIOCGSID = 0x40047463 - TIOCGTSTAMP = 0x400c745b - TIOCGWINSZ = 0x40087468 - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGET = 0x4004746a - TIOCMODG = 0x4004746a - TIOCMODS = 0x8004746d - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCREMOTE = 0x80047469 - TIOCSBRK = 0x2000747b - TIOCSCTTY = 0x20007461 - TIOCSDTR = 0x20007479 - TIOCSETA = 0x802c7414 - TIOCSETAF = 0x802c7416 - TIOCSETAW = 0x802c7415 - TIOCSETD = 0x8004741b - TIOCSFLAGS = 0x8004745c - TIOCSIG = 0x8004745f - TIOCSPGRP = 0x80047476 - TIOCSTART = 0x2000746e - TIOCSTAT = 0x80047465 - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSTSTAMP = 0x8008745a - TIOCSWINSZ = 0x80087467 - TIOCUCNTL = 0x80047466 - TOSTOP = 0x400000 - VDISCARD = 0xf - VDSUSP = 0xb - VEOF = 0x0 - VEOL = 0x1 - VEOL2 = 0x2 - VERASE = 0x3 - VINTR = 0x8 - VKILL = 0x5 - VLNEXT = 0xe - VMIN = 0x10 - VQUIT = 0x9 - VREPRINT = 0x6 - VSTART = 0xc - VSTATUS = 0x12 - VSTOP = 0xd - VSUSP = 0xa - VTIME = 0x11 - VWERASE = 0x4 - WALTSIG = 0x4 - WCONTINUED = 0x8 - WCOREFLAG = 0x80 - WNOHANG = 0x1 - WUNTRACED = 0x2 -) - -// Errors -const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) - EADDRINUSE = syscall.Errno(0x30) - EADDRNOTAVAIL = syscall.Errno(0x31) - EAFNOSUPPORT = syscall.Errno(0x2f) - EAGAIN = syscall.Errno(0x23) - EALREADY = syscall.Errno(0x25) - EAUTH = syscall.Errno(0x50) - EBADF = syscall.Errno(0x9) - EBADRPC = syscall.Errno(0x48) - EBUSY = syscall.Errno(0x10) - ECANCELED = syscall.Errno(0x58) - ECHILD = syscall.Errno(0xa) - ECONNABORTED = syscall.Errno(0x35) - ECONNREFUSED = syscall.Errno(0x3d) - ECONNRESET = syscall.Errno(0x36) - EDEADLK = syscall.Errno(0xb) - EDESTADDRREQ = syscall.Errno(0x27) - EDOM = syscall.Errno(0x21) - EDQUOT = syscall.Errno(0x45) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) - EFTYPE = syscall.Errno(0x4f) - EHOSTDOWN = syscall.Errno(0x40) - EHOSTUNREACH = syscall.Errno(0x41) - EIDRM = syscall.Errno(0x59) - EILSEQ = syscall.Errno(0x54) - EINPROGRESS = syscall.Errno(0x24) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) - EIPSEC = syscall.Errno(0x52) - EISCONN = syscall.Errno(0x38) - EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x5b) - ELOOP = syscall.Errno(0x3e) - EMEDIUMTYPE = syscall.Errno(0x56) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) - EMSGSIZE = syscall.Errno(0x28) - ENAMETOOLONG = syscall.Errno(0x3f) - ENEEDAUTH = syscall.Errno(0x51) - ENETDOWN = syscall.Errno(0x32) - ENETRESET = syscall.Errno(0x34) - ENETUNREACH = syscall.Errno(0x33) - ENFILE = syscall.Errno(0x17) - ENOATTR = syscall.Errno(0x53) - ENOBUFS = syscall.Errno(0x37) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) - ENOLCK = syscall.Errno(0x4d) - ENOMEDIUM = syscall.Errno(0x55) - ENOMEM = syscall.Errno(0xc) - ENOMSG = syscall.Errno(0x5a) - ENOPROTOOPT = syscall.Errno(0x2a) - ENOSPC = syscall.Errno(0x1c) - ENOSYS = syscall.Errno(0x4e) - ENOTBLK = syscall.Errno(0xf) - ENOTCONN = syscall.Errno(0x39) - ENOTDIR = syscall.Errno(0x14) - ENOTEMPTY = syscall.Errno(0x42) - ENOTSOCK = syscall.Errno(0x26) - ENOTSUP = syscall.Errno(0x5b) - ENOTTY = syscall.Errno(0x19) - ENXIO = syscall.Errno(0x6) - EOPNOTSUPP = syscall.Errno(0x2d) - EOVERFLOW = syscall.Errno(0x57) - EPERM = syscall.Errno(0x1) - EPFNOSUPPORT = syscall.Errno(0x2e) - EPIPE = syscall.Errno(0x20) - EPROCLIM = syscall.Errno(0x43) - EPROCUNAVAIL = syscall.Errno(0x4c) - EPROGMISMATCH = syscall.Errno(0x4b) - EPROGUNAVAIL = syscall.Errno(0x4a) - EPROTONOSUPPORT = syscall.Errno(0x2b) - EPROTOTYPE = syscall.Errno(0x29) - ERANGE = syscall.Errno(0x22) - EREMOTE = syscall.Errno(0x47) - EROFS = syscall.Errno(0x1e) - ERPCMISMATCH = syscall.Errno(0x49) - ESHUTDOWN = syscall.Errno(0x3a) - ESOCKTNOSUPPORT = syscall.Errno(0x2c) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) - ESTALE = syscall.Errno(0x46) - ETIMEDOUT = syscall.Errno(0x3c) - ETOOMANYREFS = syscall.Errno(0x3b) - ETXTBSY = syscall.Errno(0x1a) - EUSERS = syscall.Errno(0x44) - EWOULDBLOCK = syscall.Errno(0x23) - EXDEV = syscall.Errno(0x12) -) - -// Signals -const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) - SIGBUS = syscall.Signal(0xa) - SIGCHLD = syscall.Signal(0x14) - SIGCONT = syscall.Signal(0x13) - SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINFO = syscall.Signal(0x1d) - SIGINT = syscall.Signal(0x2) - SIGIO = syscall.Signal(0x17) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) - SIGPROF = syscall.Signal(0x1b) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) - SIGSTOP = syscall.Signal(0x11) - SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTHR = syscall.Signal(0x20) - SIGTRAP = syscall.Signal(0x5) - SIGTSTP = syscall.Signal(0x12) - SIGTTIN = syscall.Signal(0x15) - SIGTTOU = syscall.Signal(0x16) - SIGURG = syscall.Signal(0x10) - SIGUSR1 = syscall.Signal(0x1e) - SIGUSR2 = syscall.Signal(0x1f) - SIGVTALRM = syscall.Signal(0x1a) - SIGWINCH = syscall.Signal(0x1c) - SIGXCPU = syscall.Signal(0x18) - SIGXFSZ = syscall.Signal(0x19) -) - -// Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "IPsec processing failure", - 83: "attribute not found", - 84: "illegal byte sequence", - 85: "no medium found", - 86: "wrong medium type", - 87: "value too large to be stored in data type", - 88: "operation canceled", - 89: "identifier removed", - 90: "no message of desired type", - 91: "not supported", -} - -// Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "thread AST", -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 09eedb00935..81e83d78fce 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -664,8 +664,6 @@ const ( MS_OLDSYNC = 0x0 MS_SYNC = 0x4 M_FLUSH = 0x86 - NAME_MAX = 0xff - NEWDEV = 0x1 NL0 = 0x0 NL1 = 0x100 NLDLY = 0x100 @@ -674,9 +672,6 @@ const ( OFDEL = 0x80 OFILL = 0x40 OLCUC = 0x2 - OLDDEV = 0x0 - ONBITSMAJOR = 0x7 - ONBITSMINOR = 0x8 ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 @@ -1110,7 +1105,6 @@ const ( VEOL = 0x5 VEOL2 = 0x6 VERASE = 0x2 - VERASE2 = 0x11 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 10491e9ed32..e48f4a5c1c4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -1,5 +1,5 @@ // mksyscall.pl -l32 -tags darwin,386 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build darwin,386 @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -398,16 +298,6 @@ func kill(pid int, signum int, posix int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -566,21 +456,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -611,21 +486,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -636,21 +496,6 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -900,26 +745,6 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -960,13 +785,13 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkdirat(dirfd int, path string, mode uint32) (err error) { +func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } @@ -975,13 +800,13 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } @@ -990,13 +815,14 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1005,14 +831,24 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } @@ -1021,13 +857,39 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,28 +988,6 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1168,26 +1008,6 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1425,26 +1245,6 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1508,21 +1308,6 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 5f1f6bfef71..672ada0e44f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1,5 +1,5 @@ // mksyscall.pl -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build darwin,amd64 @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -398,16 +298,6 @@ func kill(pid int, signum int, posix int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -566,21 +456,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -611,21 +486,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -636,21 +496,6 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -900,26 +745,6 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -960,13 +785,13 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkdirat(dirfd int, path string, mode uint32) (err error) { +func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } @@ -975,13 +800,13 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } @@ -990,13 +815,14 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1005,14 +831,24 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } @@ -1021,13 +857,39 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,28 +988,6 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1168,26 +1008,6 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1425,26 +1245,6 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1508,21 +1308,6 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1598,6 +1383,21 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) sec = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 7a40974594f..d516409dbef 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -1,5 +1,5 @@ -// mksyscall.pl -tags darwin,arm syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// mksyscall.pl -l32 -tags darwin,arm syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build darwin,arm @@ -221,7 +221,7 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -398,16 +298,6 @@ func kill(pid int, signum int, posix int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -566,21 +456,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -611,21 +486,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -636,21 +496,6 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -900,26 +745,6 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -960,13 +785,13 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkdirat(dirfd int, path string, mode uint32) (err error) { +func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } @@ -975,13 +800,13 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } @@ -990,13 +815,14 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1005,14 +831,24 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } @@ -1021,13 +857,39 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,28 +988,6 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1168,26 +1008,6 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1425,26 +1245,6 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1508,21 +1308,6 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 07c6ebc9f4c..e97759c3575 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1,5 +1,5 @@ // mksyscall.pl -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build darwin,arm64 @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -398,16 +298,6 @@ func kill(pid int, signum int, posix int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -566,21 +456,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -611,21 +486,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -636,21 +496,6 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -900,26 +745,6 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -960,13 +785,13 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkdirat(dirfd int, path string, mode uint32) (err error) { +func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } @@ -975,13 +800,13 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } @@ -990,13 +815,14 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1005,14 +831,24 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } @@ -1021,13 +857,39 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,28 +988,6 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1168,26 +1008,6 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1425,26 +1245,6 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1508,21 +1308,6 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 7fa205cd03b..eafceb8e854 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) r = int(r0) @@ -929,6 +829,74 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1423,18 +1391,3 @@ func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 1a0bb4cb0e2..f53801ceef9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1,5 +1,5 @@ // mksyscall.pl -l32 -tags freebsd,386 syscall_bsd.go syscall_freebsd.go syscall_freebsd_386.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build freebsd,386 @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) r = int(r0) @@ -378,16 +278,6 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -413,36 +303,6 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func CapEnter() (err error) { - _, _, e1 := Syscall(SYS_CAP_ENTER, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func capRightsGet(version int, fd int, rightsp *CapRights) (err error) { - _, _, e1 := Syscall(SYS___CAP_RIGHTS_GET, uintptr(version), uintptr(fd), uintptr(unsafe.Pointer(rightsp))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func capRightsLimit(fd int, rightsp *CapRights) (err error) { - _, _, e1 := Syscall(SYS_CAP_RIGHTS_LIMIT, uintptr(fd), uintptr(unsafe.Pointer(rightsp)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -780,21 +640,6 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -825,21 +670,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -850,21 +680,6 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1134,26 +949,6 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -1194,13 +989,13 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkdirat(dirfd int, path string, mode uint32) (err error) { +func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1209,13 +1004,13 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } @@ -1224,13 +1019,14 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1239,8 +1035,8 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1249,14 +1045,50 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1265,13 +1097,13 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) { +func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(fdat), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1370,28 +1202,6 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1412,26 +1222,6 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1679,26 +1469,6 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1762,21 +1532,6 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1860,18 +1615,3 @@ func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index ac1e8e01364..55b07412cbd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1,5 +1,5 @@ // mksyscall.pl -tags freebsd,amd64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_amd64.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build freebsd,amd64 @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) r = int(r0) @@ -378,16 +278,6 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -413,36 +303,6 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func CapEnter() (err error) { - _, _, e1 := Syscall(SYS_CAP_ENTER, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func capRightsGet(version int, fd int, rightsp *CapRights) (err error) { - _, _, e1 := Syscall(SYS___CAP_RIGHTS_GET, uintptr(version), uintptr(fd), uintptr(unsafe.Pointer(rightsp))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func capRightsLimit(fd int, rightsp *CapRights) (err error) { - _, _, e1 := Syscall(SYS_CAP_RIGHTS_LIMIT, uintptr(fd), uintptr(unsafe.Pointer(rightsp)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -780,21 +640,6 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -825,21 +670,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -850,21 +680,6 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1134,26 +949,6 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -1194,13 +989,13 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkdirat(dirfd int, path string, mode uint32) (err error) { +func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1209,13 +1004,13 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } @@ -1224,13 +1019,14 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1239,8 +1035,8 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1249,14 +1045,50 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1265,13 +1097,13 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) { +func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(fdat), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1370,28 +1202,6 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1412,26 +1222,6 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1679,26 +1469,6 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1762,21 +1532,6 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1860,18 +1615,3 @@ func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 2b4e6acf042..0e9b42bf4fe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1,5 +1,5 @@ // mksyscall.pl -l32 -arm -tags freebsd,arm syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build freebsd,arm @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (r int, w int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) r = int(r0) @@ -378,16 +278,6 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -413,36 +303,6 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func CapEnter() (err error) { - _, _, e1 := Syscall(SYS_CAP_ENTER, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func capRightsGet(version int, fd int, rightsp *CapRights) (err error) { - _, _, e1 := Syscall(SYS___CAP_RIGHTS_GET, uintptr(version), uintptr(fd), uintptr(unsafe.Pointer(rightsp))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func capRightsLimit(fd int, rightsp *CapRights) (err error) { - _, _, e1 := Syscall(SYS_CAP_RIGHTS_LIMIT, uintptr(fd), uintptr(unsafe.Pointer(rightsp)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -780,21 +640,6 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -825,21 +670,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -850,21 +680,6 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -1134,26 +949,6 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { @@ -1194,13 +989,13 @@ func Mkdir(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkdirat(dirfd int, path string, mode uint32) (err error) { +func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1209,13 +1004,13 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } @@ -1224,13 +1019,14 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1239,8 +1035,8 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1249,14 +1045,50 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1265,13 +1097,13 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) { +func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(fdat), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1370,28 +1202,6 @@ func Readlink(path string, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1412,26 +1222,6 @@ func Rename(from string, to string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Renameat(fromfd int, from string, tofd int, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1679,26 +1469,6 @@ func Symlink(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { @@ -1762,21 +1532,6 @@ func Unlink(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1860,18 +1615,3 @@ func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 000a46833f1..d71acc17ebe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -14,31 +14,6 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -526,17 +501,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -589,6 +553,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -838,33 +817,6 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -887,74 +839,6 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1234,16 +1118,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sysinfo(info *Sysinfo_t) (err error) { _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) if e1 != 0 { @@ -1446,24 +1320,14 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1336,8 @@ func Msync(b []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 712dffde4ea..62823119cfd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -14,31 +14,6 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -526,17 +501,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -589,6 +553,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -838,33 +817,6 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -887,74 +839,6 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1234,16 +1118,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sysinfo(info *Sysinfo_t) (err error) { _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) if e1 != 0 { @@ -1446,24 +1320,14 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1336,8 @@ func Msync(b []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 338796d909d..d48e2067ef3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -14,31 +14,6 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -526,17 +501,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -589,6 +553,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -838,33 +817,6 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -887,74 +839,6 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1234,16 +1118,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sysinfo(info *Sysinfo_t) (err error) { _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) if e1 != 0 { @@ -1446,24 +1320,14 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1336,8 @@ func Msync(b []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index fc3006d97ca..1ceabe5fadb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -14,31 +14,6 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -526,17 +501,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -589,6 +553,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -838,33 +817,6 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -887,74 +839,6 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1234,16 +1118,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sysinfo(info *Sysinfo_t) (err error) { _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) if e1 != 0 { @@ -1446,24 +1320,14 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1336,8 @@ func Msync(b []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 4b0ef20762f..fd3e9721d87 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -14,31 +14,6 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -526,17 +501,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -589,6 +553,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -838,33 +817,6 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -887,74 +839,6 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1234,16 +1118,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sysinfo(info *Sysinfo_t) (err error) { _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) if e1 != 0 { @@ -1446,24 +1320,14 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1336,8 @@ func Msync(b []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 31eb98c7d9d..dfa9d930bd0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -14,31 +14,6 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -526,17 +501,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -589,6 +553,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -838,33 +817,6 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -887,74 +839,6 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1234,16 +1118,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sysinfo(info *Sysinfo_t) (err error) { _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) if e1 != 0 { @@ -1446,24 +1320,14 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1336,8 @@ func Msync(b []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 875ffa33f59..2c41e34e2bf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -14,31 +14,6 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -526,17 +501,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -589,6 +553,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -838,33 +817,6 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -887,74 +839,6 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1234,16 +1118,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sysinfo(info *Sysinfo_t) (err error) { _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) if e1 != 0 { @@ -1446,24 +1320,14 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1336,8 @@ func Msync(b []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 6863e81aa6e..da9c71f4c82 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -14,31 +14,6 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -526,17 +501,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -589,6 +553,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -838,33 +817,6 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -887,74 +839,6 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1234,16 +1118,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sysinfo(info *Sysinfo_t) (err error) { _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) if e1 != 0 { @@ -1446,24 +1320,14 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1336,8 @@ func Msync(b []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 39eacd630dc..170560a3bed 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -14,31 +14,6 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -526,17 +501,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -589,6 +553,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -838,33 +817,6 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -887,74 +839,6 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1234,16 +1118,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sysinfo(info *Sysinfo_t) (err error) { _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) if e1 != 0 { @@ -1446,24 +1320,14 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1336,8 @@ func Msync(b []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 2a79746bf51..e03a81c56a9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -14,31 +14,6 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -526,17 +501,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -589,6 +553,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -838,33 +817,6 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -887,74 +839,6 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1234,16 +1118,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sysinfo(info *Sysinfo_t) (err error) { _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) if e1 != 0 { @@ -1446,24 +1320,14 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1336,8 @@ func Msync(b []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 49021966f08..e6c13aa7a32 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -14,31 +14,6 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -526,17 +501,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -589,6 +553,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -838,33 +817,6 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -887,74 +839,6 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1234,16 +1118,6 @@ func Sync() { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Sysinfo(info *Sysinfo_t) (err error) { _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) if e1 != 0 { @@ -1446,24 +1320,14 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1336,8 @@ func Msync(b []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index db99fd0c992..3182345ece7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1,5 +1,5 @@ // mksyscall.pl -l32 -netbsd -tags netbsd,386 syscall_bsd.go syscall_netbsd.go syscall_netbsd_386.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build netbsd,386 @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -877,6 +777,74 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1329,18 +1297,3 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 7b6c2c87e6e..74ba8189a57 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1,5 +1,5 @@ // mksyscall.pl -netbsd -tags netbsd,amd64 syscall_bsd.go syscall_netbsd.go syscall_netbsd_amd64.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build netbsd,amd64 @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -877,6 +777,74 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1329,18 +1297,3 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 0f4cc3b5284..1f346e2f528 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1,5 +1,5 @@ -// mksyscall.pl -l32 -netbsd -arm -tags netbsd,arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// mksyscall.pl -l32 -arm -tags netbsd,arm syscall_bsd.go syscall_netbsd.go syscall_netbsd_arm.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build netbsd,arm @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -877,6 +777,74 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1329,18 +1297,3 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 7baea87c7b0..ca3e813926b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1,5 +1,5 @@ // mksyscall.pl -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build openbsd,386 @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -885,6 +785,74 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1387,18 +1355,3 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d69ce6b521..bf63d552ed8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1,5 +1,5 @@ // mksyscall.pl -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build openbsd,amd64 @@ -266,106 +266,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -885,6 +785,74 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1387,18 +1355,3 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go deleted file mode 100644 index 41572c26e41..00000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ /dev/null @@ -1,1404 +0,0 @@ -// mksyscall.pl -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build openbsd,arm - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimes(path string, timeval *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, behav int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Access(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chflags(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chmod(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) - nfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) - pgrp = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) - tainted = bool(r0 != 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Link(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkfifo(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Open(path string, mode int, perm uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pathconf(path string, name int) (val int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Revoke(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) - newoffset = int64(int64(r1)<<32 | int64(r0)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setlogin(name string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlink(path string, link string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Truncate(path string, length int64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlink(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) - ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 98b26655006..cbc6f6e3a35 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -25,9 +25,6 @@ import ( //go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" //go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" //go:cgo_import_dynamic libc_acct acct "libc.so" -//go:cgo_import_dynamic libc___makedev __makedev "libc.so" -//go:cgo_import_dynamic libc___major __major "libc.so" -//go:cgo_import_dynamic libc___minor __minor "libc.so" //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" //go:cgo_import_dynamic libc_access access "libc.so" //go:cgo_import_dynamic libc_adjtime adjtime "libc.so" @@ -46,7 +43,6 @@ import ( //go:cgo_import_dynamic libc_fchown fchown "libc.so" //go:cgo_import_dynamic libc_fchownat fchownat "libc.so" //go:cgo_import_dynamic libc_fdatasync fdatasync "libc.so" -//go:cgo_import_dynamic libc_flock flock "libc.so" //go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" //go:cgo_import_dynamic libc_fstat fstat "libc.so" //go:cgo_import_dynamic libc_fstatvfs fstatvfs "libc.so" @@ -78,7 +74,6 @@ import ( //go:cgo_import_dynamic libc_mlock mlock "libc.so" //go:cgo_import_dynamic libc_mlockall mlockall "libc.so" //go:cgo_import_dynamic libc_mprotect mprotect "libc.so" -//go:cgo_import_dynamic libc_msync msync "libc.so" //go:cgo_import_dynamic libc_munlock munlock "libc.so" //go:cgo_import_dynamic libc_munlockall munlockall "libc.so" //go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" @@ -133,6 +128,7 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_sysconf sysconf "libc.so" //go:linkname procpipe libc_pipe //go:linkname procgetsockname libc_getsockname @@ -149,9 +145,6 @@ import ( //go:linkname proc__xnet_recvmsg libc___xnet_recvmsg //go:linkname proc__xnet_sendmsg libc___xnet_sendmsg //go:linkname procacct libc_acct -//go:linkname proc__makedev libc___makedev -//go:linkname proc__major libc___major -//go:linkname proc__minor libc___minor //go:linkname procioctl libc_ioctl //go:linkname procAccess libc_access //go:linkname procAdjtime libc_adjtime @@ -170,7 +163,6 @@ import ( //go:linkname procFchown libc_fchown //go:linkname procFchownat libc_fchownat //go:linkname procFdatasync libc_fdatasync -//go:linkname procFlock libc_flock //go:linkname procFpathconf libc_fpathconf //go:linkname procFstat libc_fstat //go:linkname procFstatvfs libc_fstatvfs @@ -202,7 +194,6 @@ import ( //go:linkname procMlock libc_mlock //go:linkname procMlockall libc_mlockall //go:linkname procMprotect libc_mprotect -//go:linkname procMsync libc_msync //go:linkname procMunlock libc_munlock //go:linkname procMunlockall libc_munlockall //go:linkname procNanosleep libc_nanosleep @@ -257,6 +248,7 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procsysconf libc_sysconf var ( procpipe, @@ -274,9 +266,6 @@ var ( proc__xnet_recvmsg, proc__xnet_sendmsg, procacct, - proc__makedev, - proc__major, - proc__minor, procioctl, procAccess, procAdjtime, @@ -295,7 +284,6 @@ var ( procFchown, procFchownat, procFdatasync, - procFlock, procFpathconf, procFstat, procFstatvfs, @@ -327,7 +315,6 @@ var ( procMlock, procMlockall, procMprotect, - procMsync, procMunlock, procMunlockall, procNanosleep, @@ -381,7 +368,8 @@ var ( proc__xnet_getsockopt, procgetpeername, procsetsockopt, - procrecvfrom syscallFunc + procrecvfrom, + procsysconf syscallFunc ) func pipe(p *[2]_C_int) (n int, err error) { @@ -531,25 +519,7 @@ func acct(path *byte) (err error) { return } -func __makedev(version int, major uint, minor uint) (val uint64) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__makedev)), 3, uintptr(version), uintptr(major), uintptr(minor), 0, 0, 0) - val = uint64(r0) - return -} - -func __major(version int, dev uint64) (val uint) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__major)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) - val = uint(r0) - return -} - -func __minor(version int, dev uint64) (val uint) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__minor)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) - val = uint(r0) - return -} - -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) if e1 != 0 { err = e1 @@ -732,14 +702,6 @@ func Fdatasync(fd int) (err error) { return } -func Flock(fd int, how int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFlock)), 2, uintptr(fd), uintptr(how), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - func Fpathconf(fd int, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) val = int(r0) @@ -1047,18 +1009,6 @@ func Mprotect(b []byte, prot int) (err error) { return } -func Msync(b []byte, flags int) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMsync)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(flags), 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - func Munlock(b []byte) (err error) { var _p0 *byte if len(b) > 0 { @@ -1628,3 +1578,12 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } return } + +func sysconf(name int) (n int64, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsysconf)), 1, uintptr(name), 0, 0, 0, 0, 0) + n = int64(r0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go index 41cb6ed3994..b8c9aea852f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go @@ -1,5 +1,5 @@ -// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS10.2.sdk/usr/include/sys/syscall.h -// Code generated by the command above; see README.md. DO NOT EDIT. +// mksysnum_darwin.pl /usr/include/sys/syscall.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT // +build arm,darwin @@ -121,14 +121,12 @@ const ( SYS_CSOPS = 169 SYS_CSOPS_AUDITTOKEN = 170 SYS_WAITID = 173 - SYS_KDEBUG_TYPEFILTER = 177 - SYS_KDEBUG_TRACE_STRING = 178 - SYS_KDEBUG_TRACE64 = 179 SYS_KDEBUG_TRACE = 180 SYS_SETGID = 181 SYS_SETEGID = 182 SYS_SETEUID = 183 SYS_SIGRETURN = 184 + SYS_CHUD = 185 SYS_FDATASYNC = 187 SYS_STAT = 188 SYS_FSTAT = 189 @@ -142,10 +140,17 @@ const ( SYS_LSEEK = 199 SYS_TRUNCATE = 200 SYS_FTRUNCATE = 201 - SYS_SYSCTL = 202 + SYS___SYSCTL = 202 SYS_MLOCK = 203 SYS_MUNLOCK = 204 SYS_UNDELETE = 205 + SYS_ATSOCKET = 206 + SYS_ATGETMSG = 207 + SYS_ATPUTMSG = 208 + SYS_ATPSNDREQ = 209 + SYS_ATPSNDRSP = 210 + SYS_ATPGETREQ = 211 + SYS_ATPGETRSP = 212 SYS_OPEN_DPROTECTED_NP = 216 SYS_GETATTRLIST = 220 SYS_SETATTRLIST = 221 @@ -197,7 +202,9 @@ const ( SYS_SEM_WAIT = 271 SYS_SEM_TRYWAIT = 272 SYS_SEM_POST = 273 - SYS_SYSCTLBYNAME = 274 + SYS_SEM_GETVALUE = 274 + SYS_SEM_INIT = 275 + SYS_SEM_DESTROY = 276 SYS_OPEN_EXTENDED = 277 SYS_UMASK_EXTENDED = 278 SYS_STAT_EXTENDED = 279 @@ -279,6 +286,7 @@ const ( SYS_KQUEUE = 362 SYS_KEVENT = 363 SYS_LCHOWN = 364 + SYS_STACK_SNAPSHOT = 365 SYS_BSDTHREAD_REGISTER = 366 SYS_WORKQ_OPEN = 367 SYS_WORKQ_KERNRETURN = 368 @@ -287,7 +295,6 @@ const ( SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 SYS_THREAD_SELFID = 372 SYS_LEDGER = 373 - SYS_KEVENT_QOS = 374 SYS___MAC_EXECVE = 380 SYS___MAC_SYSCALL = 381 SYS___MAC_GET_FILE = 382 @@ -299,8 +306,11 @@ const ( SYS___MAC_GET_FD = 388 SYS___MAC_SET_FD = 389 SYS___MAC_GET_PID = 390 - SYS_PSELECT = 394 - SYS_PSELECT_NOCANCEL = 395 + SYS___MAC_GET_LCID = 391 + SYS___MAC_GET_LCTX = 392 + SYS___MAC_SET_LCTX = 393 + SYS_SETLCID = 394 + SYS_GETLCID = 395 SYS_READ_NOCANCEL = 396 SYS_WRITE_NOCANCEL = 397 SYS_OPEN_NOCANCEL = 398 @@ -344,83 +354,5 @@ const ( SYS_PID_SHUTDOWN_SOCKETS = 436 SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 SYS_KAS_INFO = 439 - SYS_MEMORYSTATUS_CONTROL = 440 - SYS_GUARDED_OPEN_NP = 441 - SYS_GUARDED_CLOSE_NP = 442 - SYS_GUARDED_KQUEUE_NP = 443 - SYS_CHANGE_FDGUARD_NP = 444 - SYS_USRCTL = 445 - SYS_PROC_RLIMIT_CONTROL = 446 - SYS_CONNECTX = 447 - SYS_DISCONNECTX = 448 - SYS_PEELOFF = 449 - SYS_SOCKET_DELEGATE = 450 - SYS_TELEMETRY = 451 - SYS_PROC_UUID_POLICY = 452 - SYS_MEMORYSTATUS_GET_LEVEL = 453 - SYS_SYSTEM_OVERRIDE = 454 - SYS_VFS_PURGE = 455 - SYS_SFI_CTL = 456 - SYS_SFI_PIDCTL = 457 - SYS_COALITION = 458 - SYS_COALITION_INFO = 459 - SYS_NECP_MATCH_POLICY = 460 - SYS_GETATTRLISTBULK = 461 - SYS_CLONEFILEAT = 462 - SYS_OPENAT = 463 - SYS_OPENAT_NOCANCEL = 464 - SYS_RENAMEAT = 465 - SYS_FACCESSAT = 466 - SYS_FCHMODAT = 467 - SYS_FCHOWNAT = 468 - SYS_FSTATAT = 469 - SYS_FSTATAT64 = 470 - SYS_LINKAT = 471 - SYS_UNLINKAT = 472 - SYS_READLINKAT = 473 - SYS_SYMLINKAT = 474 - SYS_MKDIRAT = 475 - SYS_GETATTRLISTAT = 476 - SYS_PROC_TRACE_LOG = 477 - SYS_BSDTHREAD_CTL = 478 - SYS_OPENBYID_NP = 479 - SYS_RECVMSG_X = 480 - SYS_SENDMSG_X = 481 - SYS_THREAD_SELFUSAGE = 482 - SYS_CSRCTL = 483 - SYS_GUARDED_OPEN_DPROTECTED_NP = 484 - SYS_GUARDED_WRITE_NP = 485 - SYS_GUARDED_PWRITE_NP = 486 - SYS_GUARDED_WRITEV_NP = 487 - SYS_RENAMEATX_NP = 488 - SYS_MREMAP_ENCRYPTED = 489 - SYS_NETAGENT_TRIGGER = 490 - SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 - SYS_MICROSTACKSHOT = 492 - SYS_GRAB_PGO_DATA = 493 - SYS_PERSONA = 494 - SYS_WORK_INTERVAL_CTL = 499 - SYS_GETENTROPY = 500 - SYS_NECP_OPEN = 501 - SYS_NECP_CLIENT_ACTION = 502 - SYS___NEXUS_OPEN = 503 - SYS___NEXUS_REGISTER = 504 - SYS___NEXUS_DEREGISTER = 505 - SYS___NEXUS_CREATE = 506 - SYS___NEXUS_DESTROY = 507 - SYS___NEXUS_GET_OPT = 508 - SYS___NEXUS_SET_OPT = 509 - SYS___CHANNEL_OPEN = 510 - SYS___CHANNEL_GET_INFO = 511 - SYS___CHANNEL_SYNC = 512 - SYS___CHANNEL_GET_OPT = 513 - SYS___CHANNEL_SET_OPT = 514 - SYS_ULOCK_WAIT = 515 - SYS_ULOCK_WAKE = 516 - SYS_FCLONEFILEAT = 517 - SYS_FS_SNAPSHOT = 518 - SYS_TERMINATE_WITH_PAYLOAD = 520 - SYS_ABORT_WITH_PAYLOAD = 521 - SYS_MAXSYSCALL = 522 - SYS_INVALID = 63 + SYS_MAXSYSCALL = 440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 075816c3488..26677ebbf5b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -1,5 +1,5 @@ -// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS10.2.sdk/usr/include/sys/syscall.h -// Code generated by the command above; see README.md. DO NOT EDIT. +// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS8.4.sdk/usr/include/sys/syscall.h +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT // +build arm64,darwin @@ -121,14 +121,13 @@ const ( SYS_CSOPS = 169 SYS_CSOPS_AUDITTOKEN = 170 SYS_WAITID = 173 - SYS_KDEBUG_TYPEFILTER = 177 - SYS_KDEBUG_TRACE_STRING = 178 SYS_KDEBUG_TRACE64 = 179 SYS_KDEBUG_TRACE = 180 SYS_SETGID = 181 SYS_SETEGID = 182 SYS_SETEUID = 183 SYS_SIGRETURN = 184 + SYS_CHUD = 185 SYS_FDATASYNC = 187 SYS_STAT = 188 SYS_FSTAT = 189 @@ -279,6 +278,7 @@ const ( SYS_KQUEUE = 362 SYS_KEVENT = 363 SYS_LCHOWN = 364 + SYS_STACK_SNAPSHOT = 365 SYS_BSDTHREAD_REGISTER = 366 SYS_WORKQ_OPEN = 367 SYS_WORKQ_KERNRETURN = 368 @@ -287,7 +287,6 @@ const ( SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 SYS_THREAD_SELFID = 372 SYS_LEDGER = 373 - SYS_KEVENT_QOS = 374 SYS___MAC_EXECVE = 380 SYS___MAC_SYSCALL = 381 SYS___MAC_GET_FILE = 382 @@ -299,8 +298,11 @@ const ( SYS___MAC_GET_FD = 388 SYS___MAC_SET_FD = 389 SYS___MAC_GET_PID = 390 - SYS_PSELECT = 394 - SYS_PSELECT_NOCANCEL = 395 + SYS___MAC_GET_LCID = 391 + SYS___MAC_GET_LCTX = 392 + SYS___MAC_SET_LCTX = 393 + SYS_SETLCID = 394 + SYS_GETLCID = 395 SYS_READ_NOCANCEL = 396 SYS_WRITE_NOCANCEL = 397 SYS_OPEN_NOCANCEL = 398 @@ -349,7 +351,6 @@ const ( SYS_GUARDED_CLOSE_NP = 442 SYS_GUARDED_KQUEUE_NP = 443 SYS_CHANGE_FDGUARD_NP = 444 - SYS_USRCTL = 445 SYS_PROC_RLIMIT_CONTROL = 446 SYS_CONNECTX = 447 SYS_DISCONNECTX = 448 @@ -366,7 +367,6 @@ const ( SYS_COALITION_INFO = 459 SYS_NECP_MATCH_POLICY = 460 SYS_GETATTRLISTBULK = 461 - SYS_CLONEFILEAT = 462 SYS_OPENAT = 463 SYS_OPENAT_NOCANCEL = 464 SYS_RENAMEAT = 465 @@ -392,35 +392,7 @@ const ( SYS_GUARDED_WRITE_NP = 485 SYS_GUARDED_PWRITE_NP = 486 SYS_GUARDED_WRITEV_NP = 487 - SYS_RENAMEATX_NP = 488 + SYS_RENAME_EXT = 488 SYS_MREMAP_ENCRYPTED = 489 - SYS_NETAGENT_TRIGGER = 490 - SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 - SYS_MICROSTACKSHOT = 492 - SYS_GRAB_PGO_DATA = 493 - SYS_PERSONA = 494 - SYS_WORK_INTERVAL_CTL = 499 - SYS_GETENTROPY = 500 - SYS_NECP_OPEN = 501 - SYS_NECP_CLIENT_ACTION = 502 - SYS___NEXUS_OPEN = 503 - SYS___NEXUS_REGISTER = 504 - SYS___NEXUS_DEREGISTER = 505 - SYS___NEXUS_CREATE = 506 - SYS___NEXUS_DESTROY = 507 - SYS___NEXUS_GET_OPT = 508 - SYS___NEXUS_SET_OPT = 509 - SYS___CHANNEL_OPEN = 510 - SYS___CHANNEL_GET_INFO = 511 - SYS___CHANNEL_SYNC = 512 - SYS___CHANNEL_GET_OPT = 513 - SYS___CHANNEL_SET_OPT = 514 - SYS_ULOCK_WAIT = 515 - SYS_ULOCK_WAKE = 516 - SYS_FCLONEFILEAT = 517 - SYS_FS_SNAPSHOT = 518 - SYS_TERMINATE_WITH_PAYLOAD = 520 - SYS_ABORT_WITH_PAYLOAD = 521 - SYS_MAXSYSCALL = 522 - SYS_INVALID = 63 + SYS_MAXSYSCALL = 490 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index b64a8122ce4..262a84536a7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,5 +1,5 @@ // mksysnum_freebsd.pl -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT // +build 386,freebsd @@ -7,347 +7,345 @@ package unix const ( // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ - SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ - SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } - SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ - SYS_LINK = 9 // { int link(char *path, char *link); } - SYS_UNLINK = 10 // { int unlink(char *path); } - SYS_CHDIR = 12 // { int chdir(char *path); } - SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } - SYS_CHMOD = 15 // { int chmod(char *path, int mode); } - SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ - SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, \ - SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } - SYS_SETUID = 23 // { int setuid(uid_t uid); } - SYS_GETUID = 24 // { uid_t getuid(void); } - SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ - SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ - SYS_ACCEPT = 30 // { int accept(int s, \ - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ - SYS_ACCESS = 33 // { int access(char *path, int amode); } - SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } - SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } - SYS_SYNC = 36 // { int sync(void); } - SYS_KILL = 37 // { int kill(int pid, int signum); } - SYS_GETPPID = 39 // { pid_t getppid(void); } - SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } - SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ - SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ - SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } - SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ - SYS_REBOOT = 55 // { int reboot(int opt); } - SYS_REVOKE = 56 // { int revoke(char *path); } - SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ - SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ - SYS_VFORK = 66 // { int vfork(void); } - SYS_SBRK = 69 // { int sbrk(int incr); } - SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ - SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ - SYS_GETPGRP = 81 // { int getpgrp(void); } - SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ - SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, \ - SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } - SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } - SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ - SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ - SYS_SOCKET = 97 // { int socket(int domain, int type, \ - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ - SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, \ - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ - SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ - SYS_GETRUSAGE = 117 // { int getrusage(int who, \ - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ - SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } - SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } - SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } - SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } - SYS_RENAME = 128 // { int rename(char *from, char *to); } - SYS_FLOCK = 131 // { int flock(int fd, int how); } - SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ - SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ - SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } - SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, \ - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ - SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ - SYS_LGETFH = 160 // { int lgetfh(char *fname, \ - SYS_GETFH = 161 // { int getfh(char *fname, \ - SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ - SYS_SETFIB = 175 // { int setfib(int fibnum); } - SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } - SYS_SETGID = 181 // { int setgid(gid_t gid); } - SYS_SETEGID = 182 // { int setegid(gid_t egid); } - SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } - SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } - SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ - SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ - SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } - SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } - SYS_UNDELETE = 205 // { int undelete(char *path); } - SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } - SYS_GETPGID = 207 // { int getpgid(pid_t pid); } - SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ - SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ - SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ - SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ - SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ - SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } - SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ - SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ - SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } - SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ - SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ - SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } - SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ - SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ - SYS_ISSETUGID = 253 // { int issetugid(void); } - SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ - SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } - SYS_LUTIMES = 276 // { int lutimes(char *path, \ - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } - SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ - SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ - SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ - SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, \ - SYS_MODFNEXT = 302 // { int modfnext(int modid); } - SYS_MODFIND = 303 // { int modfind(const char *name); } - SYS_KLDLOAD = 304 // { int kldload(const char *file); } - SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } - SYS_KLDFIND = 306 // { int kldfind(const char *file); } - SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ - SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } - SYS_GETSID = 310 // { int getsid(pid_t pid); } - SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ - SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ - SYS_YIELD = 321 // { int yield(void); } - SYS_MLOCKALL = 324 // { int mlockall(int how); } - SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } - SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ - SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ - SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ - SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } - SYS_SCHED_YIELD = 331 // { int sched_yield (void); } - SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } - SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } - SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ - SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } - SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ - SYS_JAIL = 338 // { int jail(struct jail *jail); } - SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ - SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } - SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } - SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ - SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ - SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ - SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ - SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ - SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ - SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ - SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ - SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ - SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ - SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ - SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ - SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ - SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ - SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, \ - SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ - SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ - SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ - SYS___SETUGID = 374 // { int __setugid(int flag); } - SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } - SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ - SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } - SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } - SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ - SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ - SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ - SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ - SYS_KENV = 390 // { int kenv(int what, const char *name, \ - SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ - SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ - SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ - SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ - SYS_STATFS = 396 // { int statfs(char *path, \ - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ - SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ - SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ - SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ - SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ - SYS_SIGACTION = 416 // { int sigaction(int sig, \ - SYS_SIGRETURN = 417 // { int sigreturn( \ - SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( \ - SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ - SYS_SWAPOFF = 424 // { int swapoff(const char *name); } - SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ - SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ - SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ - SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ - SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ - SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ - SYS_THR_EXIT = 431 // { void thr_exit(long *state); } - SYS_THR_SELF = 432 // { int thr_self(long *id); } - SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } - SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } - SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ - SYS_THR_SUSPEND = 442 // { int thr_suspend( \ - SYS_THR_WAKE = 443 // { int thr_wake(long id); } - SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } - SYS_AUDIT = 445 // { int audit(const void *record, \ - SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ - SYS_GETAUID = 447 // { int getauid(uid_t *auid); } - SYS_SETAUID = 448 // { int setauid(uid_t *auid); } - SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } - SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ - SYS_AUDITCTL = 453 // { int auditctl(char *path); } - SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ - SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ - SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } - SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } - SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } - SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ - SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ - SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ - SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ - SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ - SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } - SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } - SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } - SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ - SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } - SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } - SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ - SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ - SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ - SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ - SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ - SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ - SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ - SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ - SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ - SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ - SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } - SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ - SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ - SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ - SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ - SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } - SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } - SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ - SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ - SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } - SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } - SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } - SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ - SYS_CAP_ENTER = 516 // { int cap_enter(void); } - SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } - SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } - SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } - SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } - SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ - SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ - SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } - SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ - SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ - SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ - SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ - SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ - SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ - SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ - SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ - SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ - SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ - SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ - SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ - SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ - SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ - SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ - SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ - SYS_ACCEPT4 = 541 // { int accept4(int s, \ - SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } - SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ - SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ - SYS_FUTIMENS = 546 // { int futimens(int fd, \ - SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); } + SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 81722ac9f3b..57a60ea126d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,5 +1,5 @@ // mksysnum_freebsd.pl -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT // +build amd64,freebsd @@ -7,347 +7,345 @@ package unix const ( // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ - SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ - SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } - SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ - SYS_LINK = 9 // { int link(char *path, char *link); } - SYS_UNLINK = 10 // { int unlink(char *path); } - SYS_CHDIR = 12 // { int chdir(char *path); } - SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } - SYS_CHMOD = 15 // { int chmod(char *path, int mode); } - SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ - SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, \ - SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } - SYS_SETUID = 23 // { int setuid(uid_t uid); } - SYS_GETUID = 24 // { uid_t getuid(void); } - SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ - SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ - SYS_ACCEPT = 30 // { int accept(int s, \ - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ - SYS_ACCESS = 33 // { int access(char *path, int amode); } - SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } - SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } - SYS_SYNC = 36 // { int sync(void); } - SYS_KILL = 37 // { int kill(int pid, int signum); } - SYS_GETPPID = 39 // { pid_t getppid(void); } - SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } - SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ - SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ - SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } - SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ - SYS_REBOOT = 55 // { int reboot(int opt); } - SYS_REVOKE = 56 // { int revoke(char *path); } - SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ - SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ - SYS_VFORK = 66 // { int vfork(void); } - SYS_SBRK = 69 // { int sbrk(int incr); } - SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ - SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ - SYS_GETPGRP = 81 // { int getpgrp(void); } - SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ - SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, \ - SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } - SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } - SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ - SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ - SYS_SOCKET = 97 // { int socket(int domain, int type, \ - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ - SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, \ - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ - SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ - SYS_GETRUSAGE = 117 // { int getrusage(int who, \ - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ - SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } - SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } - SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } - SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } - SYS_RENAME = 128 // { int rename(char *from, char *to); } - SYS_FLOCK = 131 // { int flock(int fd, int how); } - SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ - SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ - SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } - SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, \ - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ - SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ - SYS_LGETFH = 160 // { int lgetfh(char *fname, \ - SYS_GETFH = 161 // { int getfh(char *fname, \ - SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ - SYS_SETFIB = 175 // { int setfib(int fibnum); } - SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } - SYS_SETGID = 181 // { int setgid(gid_t gid); } - SYS_SETEGID = 182 // { int setegid(gid_t egid); } - SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } - SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } - SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ - SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ - SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } - SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } - SYS_UNDELETE = 205 // { int undelete(char *path); } - SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } - SYS_GETPGID = 207 // { int getpgid(pid_t pid); } - SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ - SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ - SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ - SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ - SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ - SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } - SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ - SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ - SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } - SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ - SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ - SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } - SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ - SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ - SYS_ISSETUGID = 253 // { int issetugid(void); } - SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ - SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } - SYS_LUTIMES = 276 // { int lutimes(char *path, \ - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } - SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ - SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ - SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ - SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, \ - SYS_MODFNEXT = 302 // { int modfnext(int modid); } - SYS_MODFIND = 303 // { int modfind(const char *name); } - SYS_KLDLOAD = 304 // { int kldload(const char *file); } - SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } - SYS_KLDFIND = 306 // { int kldfind(const char *file); } - SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ - SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } - SYS_GETSID = 310 // { int getsid(pid_t pid); } - SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ - SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ - SYS_YIELD = 321 // { int yield(void); } - SYS_MLOCKALL = 324 // { int mlockall(int how); } - SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } - SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ - SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ - SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ - SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } - SYS_SCHED_YIELD = 331 // { int sched_yield (void); } - SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } - SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } - SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ - SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } - SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ - SYS_JAIL = 338 // { int jail(struct jail *jail); } - SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ - SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } - SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } - SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ - SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ - SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ - SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ - SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ - SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ - SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ - SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ - SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ - SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ - SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ - SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ - SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ - SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ - SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, \ - SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ - SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ - SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ - SYS___SETUGID = 374 // { int __setugid(int flag); } - SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } - SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ - SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } - SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } - SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ - SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ - SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ - SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ - SYS_KENV = 390 // { int kenv(int what, const char *name, \ - SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ - SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ - SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ - SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ - SYS_STATFS = 396 // { int statfs(char *path, \ - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ - SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ - SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ - SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ - SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ - SYS_SIGACTION = 416 // { int sigaction(int sig, \ - SYS_SIGRETURN = 417 // { int sigreturn( \ - SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( \ - SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ - SYS_SWAPOFF = 424 // { int swapoff(const char *name); } - SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ - SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ - SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ - SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ - SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ - SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ - SYS_THR_EXIT = 431 // { void thr_exit(long *state); } - SYS_THR_SELF = 432 // { int thr_self(long *id); } - SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } - SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } - SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ - SYS_THR_SUSPEND = 442 // { int thr_suspend( \ - SYS_THR_WAKE = 443 // { int thr_wake(long id); } - SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } - SYS_AUDIT = 445 // { int audit(const void *record, \ - SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ - SYS_GETAUID = 447 // { int getauid(uid_t *auid); } - SYS_SETAUID = 448 // { int setauid(uid_t *auid); } - SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } - SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ - SYS_AUDITCTL = 453 // { int auditctl(char *path); } - SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ - SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ - SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } - SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } - SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } - SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ - SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ - SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ - SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ - SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ - SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } - SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } - SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } - SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ - SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } - SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } - SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ - SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ - SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ - SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ - SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ - SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ - SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ - SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ - SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ - SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ - SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } - SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ - SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ - SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ - SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ - SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } - SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } - SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ - SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ - SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } - SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } - SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } - SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ - SYS_CAP_ENTER = 516 // { int cap_enter(void); } - SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } - SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } - SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } - SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } - SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ - SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ - SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } - SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ - SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ - SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ - SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ - SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ - SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ - SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ - SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ - SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ - SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ - SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ - SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ - SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ - SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ - SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ - SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ - SYS_ACCEPT4 = 541 // { int accept4(int s, \ - SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } - SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ - SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ - SYS_FUTIMENS = 546 // { int futimens(int fd, \ - SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); } + SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index 44883141816..206b9f612d4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,5 +1,5 @@ // mksysnum_freebsd.pl -// Code generated by the command above; see README.md. DO NOT EDIT. +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT // +build arm,freebsd @@ -7,347 +7,345 @@ package unix const ( // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ - SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ - SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } - SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ - SYS_LINK = 9 // { int link(char *path, char *link); } - SYS_UNLINK = 10 // { int unlink(char *path); } - SYS_CHDIR = 12 // { int chdir(char *path); } - SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } - SYS_CHMOD = 15 // { int chmod(char *path, int mode); } - SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ - SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, \ - SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } - SYS_SETUID = 23 // { int setuid(uid_t uid); } - SYS_GETUID = 24 // { uid_t getuid(void); } - SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ - SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ - SYS_ACCEPT = 30 // { int accept(int s, \ - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ - SYS_ACCESS = 33 // { int access(char *path, int amode); } - SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } - SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } - SYS_SYNC = 36 // { int sync(void); } - SYS_KILL = 37 // { int kill(int pid, int signum); } - SYS_GETPPID = 39 // { pid_t getppid(void); } - SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } - SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ - SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ - SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } - SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ - SYS_REBOOT = 55 // { int reboot(int opt); } - SYS_REVOKE = 56 // { int revoke(char *path); } - SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ - SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ - SYS_VFORK = 66 // { int vfork(void); } - SYS_SBRK = 69 // { int sbrk(int incr); } - SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ - SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ - SYS_GETPGRP = 81 // { int getpgrp(void); } - SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ - SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, \ - SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } - SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } - SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ - SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ - SYS_SOCKET = 97 // { int socket(int domain, int type, \ - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ - SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, \ - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ - SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ - SYS_GETRUSAGE = 117 // { int getrusage(int who, \ - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ - SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } - SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } - SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } - SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } - SYS_RENAME = 128 // { int rename(char *from, char *to); } - SYS_FLOCK = 131 // { int flock(int fd, int how); } - SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ - SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ - SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } - SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, \ - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ - SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ - SYS_LGETFH = 160 // { int lgetfh(char *fname, \ - SYS_GETFH = 161 // { int getfh(char *fname, \ - SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ - SYS_SETFIB = 175 // { int setfib(int fibnum); } - SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } - SYS_SETGID = 181 // { int setgid(gid_t gid); } - SYS_SETEGID = 182 // { int setegid(gid_t egid); } - SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } - SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } - SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ - SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ - SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } - SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } - SYS_UNDELETE = 205 // { int undelete(char *path); } - SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } - SYS_GETPGID = 207 // { int getpgid(pid_t pid); } - SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ - SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ - SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ - SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ - SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ - SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } - SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ - SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ - SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } - SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ - SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ - SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } - SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ - SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ - SYS_ISSETUGID = 253 // { int issetugid(void); } - SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ - SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } - SYS_LUTIMES = 276 // { int lutimes(char *path, \ - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } - SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ - SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ - SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ - SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, \ - SYS_MODFNEXT = 302 // { int modfnext(int modid); } - SYS_MODFIND = 303 // { int modfind(const char *name); } - SYS_KLDLOAD = 304 // { int kldload(const char *file); } - SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } - SYS_KLDFIND = 306 // { int kldfind(const char *file); } - SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ - SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } - SYS_GETSID = 310 // { int getsid(pid_t pid); } - SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ - SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ - SYS_YIELD = 321 // { int yield(void); } - SYS_MLOCKALL = 324 // { int mlockall(int how); } - SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } - SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ - SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ - SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ - SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } - SYS_SCHED_YIELD = 331 // { int sched_yield (void); } - SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } - SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } - SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ - SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } - SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ - SYS_JAIL = 338 // { int jail(struct jail *jail); } - SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ - SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } - SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } - SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ - SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ - SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ - SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ - SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ - SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ - SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ - SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ - SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ - SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ - SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ - SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ - SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ - SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ - SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, \ - SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ - SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ - SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ - SYS___SETUGID = 374 // { int __setugid(int flag); } - SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } - SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ - SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } - SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } - SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ - SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ - SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ - SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ - SYS_KENV = 390 // { int kenv(int what, const char *name, \ - SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ - SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ - SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ - SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ - SYS_STATFS = 396 // { int statfs(char *path, \ - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ - SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ - SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ - SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ - SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ - SYS_SIGACTION = 416 // { int sigaction(int sig, \ - SYS_SIGRETURN = 417 // { int sigreturn( \ - SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( \ - SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ - SYS_SWAPOFF = 424 // { int swapoff(const char *name); } - SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ - SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ - SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ - SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ - SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ - SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ - SYS_THR_EXIT = 431 // { void thr_exit(long *state); } - SYS_THR_SELF = 432 // { int thr_self(long *id); } - SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } - SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } - SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ - SYS_THR_SUSPEND = 442 // { int thr_suspend( \ - SYS_THR_WAKE = 443 // { int thr_wake(long id); } - SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } - SYS_AUDIT = 445 // { int audit(const void *record, \ - SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ - SYS_GETAUID = 447 // { int getauid(uid_t *auid); } - SYS_SETAUID = 448 // { int setauid(uid_t *auid); } - SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } - SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ - SYS_AUDITCTL = 453 // { int auditctl(char *path); } - SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ - SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ - SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } - SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } - SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } - SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ - SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ - SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ - SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ - SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ - SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } - SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } - SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } - SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ - SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } - SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } - SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ - SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ - SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ - SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ - SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ - SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ - SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ - SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ - SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ - SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ - SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } - SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ - SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ - SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ - SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ - SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } - SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } - SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ - SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ - SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } - SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } - SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } - SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ - SYS_CAP_ENTER = 516 // { int cap_enter(void); } - SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } - SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } - SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } - SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } - SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ - SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ - SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } - SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ - SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ - SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ - SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ - SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ - SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ - SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ - SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ - SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ - SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ - SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ - SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ - SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ - SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ - SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ - SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ - SYS_ACCEPT4 = 541 // { int accept4(int s, \ - SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } - SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ - SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ - SYS_FUTIMENS = 546 // { int futimens(int fd, \ - SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, \ + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, \ + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } + SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } + SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, \ + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, \ + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, \ + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } + SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } + SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ + SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); } + SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \ + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 95ab12903ec..cef4fed02c5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -385,6 +385,4 @@ const ( SYS_PKEY_MPROTECT = 380 SYS_PKEY_ALLOC = 381 SYS_PKEY_FREE = 382 - SYS_STATX = 383 - SYS_ARCH_PRCTL = 384 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index c5dabf2e451..49bfa1270a8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -338,5 +338,4 @@ const ( SYS_PKEY_MPROTECT = 329 SYS_PKEY_ALLOC = 330 SYS_PKEY_FREE = 331 - SYS_STATX = 332 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index ab7fa5fd393..97b182ef5b0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -358,5 +358,4 @@ const ( SYS_PKEY_MPROTECT = 394 SYS_PKEY_ALLOC = 395 SYS_PKEY_FREE = 396 - SYS_STATX = 397 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index b1c6b4bd3b3..640784357fe 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -282,5 +282,4 @@ const ( SYS_PKEY_MPROTECT = 288 SYS_PKEY_ALLOC = 289 SYS_PKEY_FREE = 290 - SYS_STATX = 291 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 2e9aa7a3e7e..939567c0997 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -371,5 +371,4 @@ const ( SYS_PKEY_MPROTECT = 4363 SYS_PKEY_ALLOC = 4364 SYS_PKEY_FREE = 4365 - SYS_STATX = 4366 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 92827635aae..09db959690a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -331,5 +331,4 @@ const ( SYS_PKEY_MPROTECT = 5323 SYS_PKEY_ALLOC = 5324 SYS_PKEY_FREE = 5325 - SYS_STATX = 5326 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 45bd3fd6c84..d1b872a09bd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -331,5 +331,4 @@ const ( SYS_PKEY_MPROTECT = 5323 SYS_PKEY_ALLOC = 5324 SYS_PKEY_FREE = 5325 - SYS_STATX = 5326 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 62ccac4b742..82ba20f28b4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -371,5 +371,4 @@ const ( SYS_PKEY_MPROTECT = 4363 SYS_PKEY_ALLOC = 4364 SYS_PKEY_FREE = 4365 - SYS_STATX = 4366 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index dfe5dab67ee..8944448aee2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -366,5 +366,4 @@ const ( SYS_PREADV2 = 380 SYS_PWRITEV2 = 381 SYS_KEXEC_FILE_LOAD = 382 - SYS_STATX = 383 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index eca97f738b7..90a039be4fd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -366,5 +366,4 @@ const ( SYS_PREADV2 = 380 SYS_PWRITEV2 = 381 SYS_KEXEC_FILE_LOAD = 382 - SYS_STATX = 383 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 8ea18e6c254..aab0cdb1838 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -306,8 +306,6 @@ const ( SYS_COPY_FILE_RANGE = 375 SYS_PREADV2 = 376 SYS_PWRITEV2 = 377 - SYS_S390_GUARDED_STORAGE = 378 - SYS_STATX = 379 SYS_SELECT = 142 SYS_GETRLIMIT = 191 SYS_LCHOWN = 198 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index 8afda9c4512..f60d8f98823 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -134,7 +134,6 @@ const ( SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); } SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); } SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); } - SYS_MSYNC = 277 // { int|sys|13|msync(void *addr, size_t len, int flags); } SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); } SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); } SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index aea8dbec43c..48a91d46464 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -134,7 +134,6 @@ const ( SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); } SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); } SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); } - SYS_MSYNC = 277 // { int|sys|13|msync(void *addr, size_t len, int flags); } SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); } SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); } SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index c6158a7ef92..612ba662cb2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -134,7 +134,6 @@ const ( SYS_MINHERIT = 273 // { int|sys||minherit(void *addr, size_t len, int inherit); } SYS_LCHMOD = 274 // { int|sys||lchmod(const char *path, mode_t mode); } SYS_LCHOWN = 275 // { int|sys||lchown(const char *path, uid_t uid, gid_t gid); } - SYS_MSYNC = 277 // { int|sys|13|msync(void *addr, size_t len, int flags); } SYS___POSIX_CHOWN = 283 // { int|sys||__posix_chown(const char *path, uid_t uid, gid_t gid); } SYS___POSIX_FCHOWN = 284 // { int|sys||__posix_fchown(int fd, uid_t uid, gid_t gid); } SYS___POSIX_LCHOWN = 285 // { int|sys||__posix_lchown(const char *path, uid_t uid, gid_t gid); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go deleted file mode 100644 index 32653e53c70..00000000000 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ /dev/null @@ -1,213 +0,0 @@ -// mksysnum_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT - -// +build arm,openbsd - -package unix - -const ( - SYS_EXIT = 1 // { void sys_exit(int rval); } - SYS_FORK = 2 // { int sys_fork(void); } - SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } - SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, \ - SYS_OPEN = 5 // { int sys_open(const char *path, \ - SYS_CLOSE = 6 // { int sys_close(int fd); } - SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } - SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, \ - SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } - SYS_UNLINK = 10 // { int sys_unlink(const char *path); } - SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, \ - SYS_CHDIR = 12 // { int sys_chdir(const char *path); } - SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } - SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, \ - SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } - SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, \ - SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break - SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } - SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, \ - SYS_GETPID = 20 // { pid_t sys_getpid(void); } - SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, \ - SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } - SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } - SYS_GETUID = 24 // { uid_t sys_getuid(void); } - SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } - SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, \ - SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, \ - SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, \ - SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, \ - SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, \ - SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, \ - SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, \ - SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } - SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } - SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } - SYS_SYNC = 36 // { void sys_sync(void); } - SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } - SYS_GETPPID = 39 // { pid_t sys_getppid(void); } - SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } - SYS_DUP = 41 // { int sys_dup(int fd); } - SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, \ - SYS_GETEGID = 43 // { gid_t sys_getegid(void); } - SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, \ - SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, \ - SYS_SIGACTION = 46 // { int sys_sigaction(int signum, \ - SYS_GETGID = 47 // { gid_t sys_getgid(void); } - SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } - SYS_GETLOGIN = 49 // { int sys_getlogin(char *namebuf, u_int namelen); } - SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } - SYS_ACCT = 51 // { int sys_acct(const char *path); } - SYS_SIGPENDING = 52 // { int sys_sigpending(void); } - SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } - SYS_IOCTL = 54 // { int sys_ioctl(int fd, \ - SYS_REBOOT = 55 // { int sys_reboot(int opt); } - SYS_REVOKE = 56 // { int sys_revoke(const char *path); } - SYS_SYMLINK = 57 // { int sys_symlink(const char *path, \ - SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, \ - SYS_EXECVE = 59 // { int sys_execve(const char *path, \ - SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } - SYS_CHROOT = 61 // { int sys_chroot(const char *path); } - SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, \ - SYS_STATFS = 63 // { int sys_statfs(const char *path, \ - SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, \ - SYS_VFORK = 66 // { int sys_vfork(void); } - SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, \ - SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, \ - SYS_SETITIMER = 69 // { int sys_setitimer(int which, \ - SYS_GETITIMER = 70 // { int sys_getitimer(int which, \ - SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, \ - SYS_KEVENT = 72 // { int sys_kevent(int fd, \ - SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, \ - SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, \ - SYS_UTIMES = 76 // { int sys_utimes(const char *path, \ - SYS_FUTIMES = 77 // { int sys_futimes(int fd, \ - SYS_MINCORE = 78 // { int sys_mincore(void *addr, size_t len, \ - SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, \ - SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, \ - SYS_GETPGRP = 81 // { int sys_getpgrp(void); } - SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } - SYS_SENDSYSLOG = 83 // { int sys_sendsyslog(const void *buf, size_t nbyte); } - SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, \ - SYS_FUTIMENS = 85 // { int sys_futimens(int fd, \ - SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, \ - SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, \ - SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, \ - SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } - SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, \ - SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } - SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, \ - SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, \ - SYS_FSYNC = 95 // { int sys_fsync(int fd); } - SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } - SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } - SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, \ - SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } - SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } - SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } - SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } - SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } - SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, \ - SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, \ - SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } - SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, \ - SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, \ - SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \ - SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } - SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, \ - SYS_READV = 120 // { ssize_t sys_readv(int fd, \ - SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, \ - SYS_KILL = 122 // { int sys_kill(int pid, int signum); } - SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } - SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } - SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } - SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } - SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } - SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } - SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } - SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, \ - SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, \ - SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } - SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } - SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, \ - SYS_SETSID = 147 // { int sys_setsid(void); } - SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, \ - SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } - SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } - SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } - SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, \ - SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, \ - SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } - SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } - SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } - SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } - SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } - SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } - SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, \ - SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, \ - SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, \ - SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, \ - SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, \ - SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } - SYS___SYSCTL = 202 // { int sys___sysctl(const int *name, u_int namelen, \ - SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } - SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } - SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } - SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, \ - SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } - SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } - SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, \ - SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, \ - SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, \ - SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } - SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, \ - SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, \ - SYS_ISSETUGID = 253 // { int sys_issetugid(void); } - SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } - SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } - SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } - SYS_PIPE = 263 // { int sys_pipe(int *fdp); } - SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } - SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, \ - SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, \ - SYS_KQUEUE = 269 // { int sys_kqueue(void); } - SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } - SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } - SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, \ - SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, \ - SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, \ - SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, \ - SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, \ - SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } - SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, \ - SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } - SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, \ - SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, \ - SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, \ - SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, \ - SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, \ - SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } - SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } - SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, \ - SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } - SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, \ - SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } - SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, \ - SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } - SYS_GETRTABLE = 311 // { int sys_getrtable(void); } - SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, \ - SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, \ - SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, \ - SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, \ - SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, \ - SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, \ - SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, \ - SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, \ - SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, \ - SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, \ - SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, \ - SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, \ - SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } - SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index e61d78a54ff..2de1d44e281 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -1,7 +1,6 @@ -// cgo -godefs types_darwin.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - // +build 386,darwin +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go package unix @@ -446,17 +445,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -const ( - AT_FDCWD = -0x2 - AT_REMOVEDIR = 0x80 - AT_SYMLINK_FOLLOW = 0x40 - AT_SYMLINK_NOFOLLOW = 0x20 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 2619155ff8c..044657878c8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -1,7 +1,6 @@ -// cgo -godefs types_darwin.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - // +build amd64,darwin +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go package unix @@ -457,16 +456,7 @@ type Termios struct { Ospeed uint64 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - const ( AT_FDCWD = -0x2 - AT_REMOVEDIR = 0x80 - AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 4dca0d4db26..66df363ce5b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -447,17 +447,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -const ( - AT_FDCWD = -0x2 - AT_REMOVEDIR = 0x80 - AT_SYMLINK_FOLLOW = 0x40 - AT_SYMLINK_NOFOLLOW = 0x20 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index f2881fd1427..85d56eabd3f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -455,17 +455,3 @@ type Termios struct { Ispeed uint64 Ospeed uint64 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -const ( - AT_FDCWD = -0x2 - AT_REMOVEDIR = 0x80 - AT_SYMLINK_FOLLOW = 0x40 - AT_SYMLINK_NOFOLLOW = 0x20 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 67c6bf883cb..e585c893abc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -441,8 +441,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -const ( - AT_FDCWD = 0xfffafdcd - AT_SYMLINK_NOFOLLOW = 0x1 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 5b28bcbbac9..8cf30947b41 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -1,7 +1,6 @@ -// cgo -godefs types_freebsd.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - // +build 386,freebsd +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go package unix @@ -86,7 +85,7 @@ type Stat_t struct { Ctimespec Timespec Size int64 Blocks int64 - Blksize int32 + Blksize uint32 Flags uint32 Gen uint32 Lspare int32 @@ -289,9 +288,9 @@ type FdSet struct { } const ( - sizeofIfMsghdr = 0xa8 + sizeofIfMsghdr = 0x64 SizeofIfMsghdr = 0x60 - sizeofIfData = 0x98 + sizeofIfData = 0x54 SizeofIfData = 0x50 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 @@ -323,31 +322,31 @@ type IfMsghdr struct { } type ifData struct { - Type uint8 - Physical uint8 - Addrlen uint8 - Hdrlen uint8 - Link_state uint8 - Vhid uint8 - Datalen uint16 - Mtu uint32 - Metric uint32 - Baudrate uint64 - Ipackets uint64 - Ierrors uint64 - Opackets uint64 - Oerrors uint64 - Collisions uint64 - Ibytes uint64 - Obytes uint64 - Imcasts uint64 - Omcasts uint64 - Iqdrops uint64 - Oqdrops uint64 - Noproto uint64 - Hwassist uint64 - X__ifi_epoch [8]byte - X__ifi_lastchange [16]byte + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Baudrate_pf uint8 + Datalen uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Hwassist uint64 + Epoch int32 + Lastchange Timeval } type IfData struct { @@ -501,21 +500,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -const ( - AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x200 -) - -type CapRights struct { - Rights [2]uint64 -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index c65d89e4972..e5feb207be6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -1,7 +1,6 @@ -// cgo -godefs types_freebsd.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - // +build amd64,freebsd +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go package unix @@ -86,7 +85,7 @@ type Stat_t struct { Ctimespec Timespec Size int64 Blocks int64 - Blksize int32 + Blksize uint32 Flags uint32 Gen uint32 Lspare int32 @@ -325,31 +324,31 @@ type IfMsghdr struct { } type ifData struct { - Type uint8 - Physical uint8 - Addrlen uint8 - Hdrlen uint8 - Link_state uint8 - Vhid uint8 - Datalen uint16 - Mtu uint32 - Metric uint32 - Baudrate uint64 - Ipackets uint64 - Ierrors uint64 - Opackets uint64 - Oerrors uint64 - Collisions uint64 - Ibytes uint64 - Obytes uint64 - Imcasts uint64 - Omcasts uint64 - Iqdrops uint64 - Oqdrops uint64 - Noproto uint64 - Hwassist uint64 - X__ifi_epoch [8]byte - X__ifi_lastchange [16]byte + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Baudrate_pf uint8 + Datalen uint8 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Hwassist uint64 + Epoch int64 + Lastchange Timeval } type IfData struct { @@ -504,21 +503,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -const ( - AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x200 -) - -type CapRights struct { - Rights [2]uint64 -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 42c0a502cf8..5472b54284b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -1,5 +1,5 @@ -// cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -fsigned-char types_freebsd.go // +build arm,freebsd @@ -88,7 +88,7 @@ type Stat_t struct { Ctimespec Timespec Size int64 Blocks int64 - Blksize int32 + Blksize uint32 Flags uint32 Gen uint32 Lspare int32 @@ -142,15 +142,6 @@ type Fsid struct { Val [2]int32 } -const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 -) - type RawSockaddrInet4 struct { Len uint8 Family uint8 @@ -291,9 +282,9 @@ type FdSet struct { } const ( - sizeofIfMsghdr = 0xa8 + sizeofIfMsghdr = 0x70 SizeofIfMsghdr = 0x70 - sizeofIfData = 0x98 + sizeofIfData = 0x60 SizeofIfData = 0x60 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 @@ -325,31 +316,31 @@ type IfMsghdr struct { } type ifData struct { - Type uint8 - Physical uint8 - Addrlen uint8 - Hdrlen uint8 - Link_state uint8 - Vhid uint8 - Datalen uint16 - Mtu uint32 - Metric uint32 - Baudrate uint64 - Ipackets uint64 - Ierrors uint64 - Opackets uint64 - Oerrors uint64 - Collisions uint64 - Ibytes uint64 - Obytes uint64 - Imcasts uint64 - Omcasts uint64 - Iqdrops uint64 - Oqdrops uint64 - Noproto uint64 - Hwassist uint64 - X__ifi_epoch [8]byte - X__ifi_lastchange [16]byte + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Baudrate_pf uint8 + Datalen uint8 + Mtu uint32 + Metric uint32 + Baudrate uint32 + Ipackets uint32 + Ierrors uint32 + Opackets uint32 + Oerrors uint32 + Collisions uint32 + Ibytes uint32 + Obytes uint32 + Imcasts uint32 + Omcasts uint32 + Iqdrops uint32 + Noproto uint32 + Hwassist uint64 + Epoch int64 + Lastchange Timeval } type IfData struct { @@ -504,21 +495,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -const ( - AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x200 -) - -type CapRights struct { - Rights [2]uint64 -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 8b30c69975e..20ae16bc21d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -285,13 +285,6 @@ type IPv6Mreq struct { Interface uint32 } -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -380,11 +373,9 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofLinger = 0x8 - SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 SizeofMsghdr = 0x1c SizeofCmsghdr = 0xc SizeofInet4Pktinfo = 0xc @@ -425,7 +416,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2c + IFLA_MAX = 0x2b RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -669,9 +660,7 @@ type Sigset_t struct { X__val [32]uint32 } -const RNDGETENTCNT = 0x80045200 - -const PERF_IOC_FLAG_GROUP = 0x1 +const _SC_PAGESIZE = 0x1e type Termios struct { Iflag uint32 @@ -683,111 +672,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -type Taskstats struct { - Version uint16 - Pad_cgo_0 [2]byte - Ac_exitcode uint32 - Ac_flag uint8 - Ac_nice uint8 - Pad_cgo_1 [6]byte - Cpu_count uint64 - Cpu_delay_total uint64 - Blkio_count uint64 - Blkio_delay_total uint64 - Swapin_count uint64 - Swapin_delay_total uint64 - Cpu_run_real_total uint64 - Cpu_run_virtual_total uint64 - Ac_comm [32]int8 - Ac_sched uint8 - Ac_pad [3]uint8 - Pad_cgo_2 [4]byte - Ac_uid uint32 - Ac_gid uint32 - Ac_pid uint32 - Ac_ppid uint32 - Ac_btime uint32 - Pad_cgo_3 [4]byte - Ac_etime uint64 - Ac_utime uint64 - Ac_stime uint64 - Ac_minflt uint64 - Ac_majflt uint64 - Coremem uint64 - Virtmem uint64 - Hiwater_rss uint64 - Hiwater_vm uint64 - Read_char uint64 - Write_char uint64 - Read_syscalls uint64 - Write_syscalls uint64 - Read_bytes uint64 - Write_bytes uint64 - Cancelled_write_bytes uint64 - Nvcsw uint64 - Nivcsw uint64 - Ac_utimescaled uint64 - Ac_stimescaled uint64 - Cpu_scaled_run_real_total uint64 - Freepages_count uint64 - Freepages_delay_total uint64 -} - -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index cf035898625..1878142033f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -287,13 +287,6 @@ type IPv6Mreq struct { Interface uint32 } -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -384,11 +377,9 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofLinger = 0x8 - SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 SizeofMsghdr = 0x38 SizeofCmsghdr = 0x10 SizeofInet4Pktinfo = 0xc @@ -429,7 +420,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2c + IFLA_MAX = 0x2b RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -687,9 +678,7 @@ type Sigset_t struct { X__val [16]uint64 } -const RNDGETENTCNT = 0x80045200 - -const PERF_IOC_FLAG_GROUP = 0x1 +const _SC_PAGESIZE = 0x1e type Termios struct { Iflag uint32 @@ -701,111 +690,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -type Taskstats struct { - Version uint16 - Pad_cgo_0 [2]byte - Ac_exitcode uint32 - Ac_flag uint8 - Ac_nice uint8 - Pad_cgo_1 [6]byte - Cpu_count uint64 - Cpu_delay_total uint64 - Blkio_count uint64 - Blkio_delay_total uint64 - Swapin_count uint64 - Swapin_delay_total uint64 - Cpu_run_real_total uint64 - Cpu_run_virtual_total uint64 - Ac_comm [32]int8 - Ac_sched uint8 - Ac_pad [3]uint8 - Pad_cgo_2 [4]byte - Ac_uid uint32 - Ac_gid uint32 - Ac_pid uint32 - Ac_ppid uint32 - Ac_btime uint32 - Pad_cgo_3 [4]byte - Ac_etime uint64 - Ac_utime uint64 - Ac_stime uint64 - Ac_minflt uint64 - Ac_majflt uint64 - Coremem uint64 - Virtmem uint64 - Hiwater_rss uint64 - Hiwater_vm uint64 - Read_char uint64 - Write_char uint64 - Read_syscalls uint64 - Write_syscalls uint64 - Read_bytes uint64 - Write_bytes uint64 - Cancelled_write_bytes uint64 - Nvcsw uint64 - Nivcsw uint64 - Ac_utimescaled uint64 - Ac_stimescaled uint64 - Cpu_scaled_run_real_total uint64 - Freepages_count uint64 - Freepages_delay_total uint64 -} - -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 8ef7d85f178..b7556684a60 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -289,13 +289,6 @@ type IPv6Mreq struct { Interface uint32 } -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -384,11 +377,9 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofLinger = 0x8 - SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 SizeofMsghdr = 0x1c SizeofCmsghdr = 0xc SizeofInet4Pktinfo = 0xc @@ -429,7 +420,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2c + IFLA_MAX = 0x2b RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -658,9 +649,7 @@ type Sigset_t struct { X__val [32]uint32 } -const RNDGETENTCNT = 0x80045200 - -const PERF_IOC_FLAG_GROUP = 0x1 +const _SC_PAGESIZE = 0x1e type Termios struct { Iflag uint32 @@ -672,111 +661,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -type Taskstats struct { - Version uint16 - Pad_cgo_0 [2]byte - Ac_exitcode uint32 - Ac_flag uint8 - Ac_nice uint8 - Pad_cgo_1 [6]byte - Cpu_count uint64 - Cpu_delay_total uint64 - Blkio_count uint64 - Blkio_delay_total uint64 - Swapin_count uint64 - Swapin_delay_total uint64 - Cpu_run_real_total uint64 - Cpu_run_virtual_total uint64 - Ac_comm [32]uint8 - Ac_sched uint8 - Ac_pad [3]uint8 - Pad_cgo_2 [4]byte - Ac_uid uint32 - Ac_gid uint32 - Ac_pid uint32 - Ac_ppid uint32 - Ac_btime uint32 - Pad_cgo_3 [4]byte - Ac_etime uint64 - Ac_utime uint64 - Ac_stime uint64 - Ac_minflt uint64 - Ac_majflt uint64 - Coremem uint64 - Virtmem uint64 - Hiwater_rss uint64 - Hiwater_vm uint64 - Read_char uint64 - Write_char uint64 - Read_syscalls uint64 - Write_syscalls uint64 - Read_bytes uint64 - Write_bytes uint64 - Cancelled_write_bytes uint64 - Nvcsw uint64 - Nivcsw uint64 - Ac_utimescaled uint64 - Ac_stimescaled uint64 - Cpu_scaled_run_real_total uint64 - Freepages_count uint64 - Freepages_delay_total uint64 -} - -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 31102686730..1e99b936006 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -288,13 +288,6 @@ type IPv6Mreq struct { Interface uint32 } -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -385,11 +378,9 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofLinger = 0x8 - SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 SizeofMsghdr = 0x38 SizeofCmsghdr = 0x10 SizeofInet4Pktinfo = 0xc @@ -430,7 +421,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2c + IFLA_MAX = 0x2b RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -666,9 +657,7 @@ type Sigset_t struct { X__val [16]uint64 } -const RNDGETENTCNT = 0x80045200 - -const PERF_IOC_FLAG_GROUP = 0x1 +const _SC_PAGESIZE = 0x1e type Termios struct { Iflag uint32 @@ -680,111 +669,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -type Taskstats struct { - Version uint16 - Pad_cgo_0 [2]byte - Ac_exitcode uint32 - Ac_flag uint8 - Ac_nice uint8 - Pad_cgo_1 [6]byte - Cpu_count uint64 - Cpu_delay_total uint64 - Blkio_count uint64 - Blkio_delay_total uint64 - Swapin_count uint64 - Swapin_delay_total uint64 - Cpu_run_real_total uint64 - Cpu_run_virtual_total uint64 - Ac_comm [32]int8 - Ac_sched uint8 - Ac_pad [3]uint8 - Pad_cgo_2 [4]byte - Ac_uid uint32 - Ac_gid uint32 - Ac_pid uint32 - Ac_ppid uint32 - Ac_btime uint32 - Pad_cgo_3 [4]byte - Ac_etime uint64 - Ac_utime uint64 - Ac_stime uint64 - Ac_minflt uint64 - Ac_majflt uint64 - Coremem uint64 - Virtmem uint64 - Hiwater_rss uint64 - Hiwater_vm uint64 - Read_char uint64 - Write_char uint64 - Read_syscalls uint64 - Write_syscalls uint64 - Read_bytes uint64 - Write_bytes uint64 - Cancelled_write_bytes uint64 - Nvcsw uint64 - Nivcsw uint64 - Ac_utimescaled uint64 - Ac_stimescaled uint64 - Cpu_scaled_run_real_total uint64 - Freepages_count uint64 - Freepages_delay_total uint64 -} - -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index d2c1bc2c83d..9a7916cbe8c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -288,13 +288,6 @@ type IPv6Mreq struct { Interface uint32 } -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -383,11 +376,9 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofLinger = 0x8 - SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 SizeofMsghdr = 0x1c SizeofCmsghdr = 0xc SizeofInet4Pktinfo = 0xc @@ -428,7 +419,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2c + IFLA_MAX = 0x2b RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -663,9 +654,7 @@ type Sigset_t struct { X__val [32]uint32 } -const RNDGETENTCNT = 0x40045200 - -const PERF_IOC_FLAG_GROUP = 0x1 +const _SC_PAGESIZE = 0x1e type Termios struct { Iflag uint32 @@ -677,111 +666,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -type Taskstats struct { - Version uint16 - Pad_cgo_0 [2]byte - Ac_exitcode uint32 - Ac_flag uint8 - Ac_nice uint8 - Pad_cgo_1 [6]byte - Cpu_count uint64 - Cpu_delay_total uint64 - Blkio_count uint64 - Blkio_delay_total uint64 - Swapin_count uint64 - Swapin_delay_total uint64 - Cpu_run_real_total uint64 - Cpu_run_virtual_total uint64 - Ac_comm [32]int8 - Ac_sched uint8 - Ac_pad [3]uint8 - Pad_cgo_2 [4]byte - Ac_uid uint32 - Ac_gid uint32 - Ac_pid uint32 - Ac_ppid uint32 - Ac_btime uint32 - Pad_cgo_3 [4]byte - Ac_etime uint64 - Ac_utime uint64 - Ac_stime uint64 - Ac_minflt uint64 - Ac_majflt uint64 - Coremem uint64 - Virtmem uint64 - Hiwater_rss uint64 - Hiwater_vm uint64 - Read_char uint64 - Write_char uint64 - Read_syscalls uint64 - Write_syscalls uint64 - Read_bytes uint64 - Write_bytes uint64 - Cancelled_write_bytes uint64 - Nvcsw uint64 - Nivcsw uint64 - Ac_utimescaled uint64 - Ac_stimescaled uint64 - Cpu_scaled_run_real_total uint64 - Freepages_count uint64 - Freepages_delay_total uint64 -} - -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ec7a0cd2751..0b58c64d1c3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -288,13 +288,6 @@ type IPv6Mreq struct { Interface uint32 } -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -385,11 +378,9 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofLinger = 0x8 - SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 SizeofMsghdr = 0x38 SizeofCmsghdr = 0x10 SizeofInet4Pktinfo = 0xc @@ -430,7 +421,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2c + IFLA_MAX = 0x2b RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -668,9 +659,7 @@ type Sigset_t struct { X__val [16]uint64 } -const RNDGETENTCNT = 0x40045200 - -const PERF_IOC_FLAG_GROUP = 0x1 +const _SC_PAGESIZE = 0x1e type Termios struct { Iflag uint32 @@ -682,111 +671,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -type Taskstats struct { - Version uint16 - Pad_cgo_0 [2]byte - Ac_exitcode uint32 - Ac_flag uint8 - Ac_nice uint8 - Pad_cgo_1 [6]byte - Cpu_count uint64 - Cpu_delay_total uint64 - Blkio_count uint64 - Blkio_delay_total uint64 - Swapin_count uint64 - Swapin_delay_total uint64 - Cpu_run_real_total uint64 - Cpu_run_virtual_total uint64 - Ac_comm [32]int8 - Ac_sched uint8 - Ac_pad [3]uint8 - Pad_cgo_2 [4]byte - Ac_uid uint32 - Ac_gid uint32 - Ac_pid uint32 - Ac_ppid uint32 - Ac_btime uint32 - Pad_cgo_3 [4]byte - Ac_etime uint64 - Ac_utime uint64 - Ac_stime uint64 - Ac_minflt uint64 - Ac_majflt uint64 - Coremem uint64 - Virtmem uint64 - Hiwater_rss uint64 - Hiwater_vm uint64 - Read_char uint64 - Write_char uint64 - Read_syscalls uint64 - Write_syscalls uint64 - Read_bytes uint64 - Write_bytes uint64 - Cancelled_write_bytes uint64 - Nvcsw uint64 - Nivcsw uint64 - Ac_utimescaled uint64 - Ac_stimescaled uint64 - Cpu_scaled_run_real_total uint64 - Freepages_count uint64 - Freepages_delay_total uint64 -} - -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index bbe08d7db76..aa6a6637d7b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -288,13 +288,6 @@ type IPv6Mreq struct { Interface uint32 } -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -385,11 +378,9 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofLinger = 0x8 - SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 SizeofMsghdr = 0x38 SizeofCmsghdr = 0x10 SizeofInet4Pktinfo = 0xc @@ -430,7 +421,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2c + IFLA_MAX = 0x2b RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -668,9 +659,7 @@ type Sigset_t struct { X__val [16]uint64 } -const RNDGETENTCNT = 0x40045200 - -const PERF_IOC_FLAG_GROUP = 0x1 +const _SC_PAGESIZE = 0x1e type Termios struct { Iflag uint32 @@ -682,111 +671,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -type Taskstats struct { - Version uint16 - Pad_cgo_0 [2]byte - Ac_exitcode uint32 - Ac_flag uint8 - Ac_nice uint8 - Pad_cgo_1 [6]byte - Cpu_count uint64 - Cpu_delay_total uint64 - Blkio_count uint64 - Blkio_delay_total uint64 - Swapin_count uint64 - Swapin_delay_total uint64 - Cpu_run_real_total uint64 - Cpu_run_virtual_total uint64 - Ac_comm [32]int8 - Ac_sched uint8 - Ac_pad [3]uint8 - Pad_cgo_2 [4]byte - Ac_uid uint32 - Ac_gid uint32 - Ac_pid uint32 - Ac_ppid uint32 - Ac_btime uint32 - Pad_cgo_3 [4]byte - Ac_etime uint64 - Ac_utime uint64 - Ac_stime uint64 - Ac_minflt uint64 - Ac_majflt uint64 - Coremem uint64 - Virtmem uint64 - Hiwater_rss uint64 - Hiwater_vm uint64 - Read_char uint64 - Write_char uint64 - Read_syscalls uint64 - Write_syscalls uint64 - Read_bytes uint64 - Write_bytes uint64 - Cancelled_write_bytes uint64 - Nvcsw uint64 - Nivcsw uint64 - Ac_utimescaled uint64 - Ac_stimescaled uint64 - Cpu_scaled_run_real_total uint64 - Freepages_count uint64 - Freepages_delay_total uint64 -} - -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 75ee05ab472..b5ef69cf96e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -288,13 +288,6 @@ type IPv6Mreq struct { Interface uint32 } -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -383,11 +376,9 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofLinger = 0x8 - SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 SizeofMsghdr = 0x1c SizeofCmsghdr = 0xc SizeofInet4Pktinfo = 0xc @@ -428,7 +419,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2c + IFLA_MAX = 0x2b RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -663,9 +654,7 @@ type Sigset_t struct { X__val [32]uint32 } -const RNDGETENTCNT = 0x40045200 - -const PERF_IOC_FLAG_GROUP = 0x1 +const _SC_PAGESIZE = 0x1e type Termios struct { Iflag uint32 @@ -677,111 +666,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -type Taskstats struct { - Version uint16 - Pad_cgo_0 [2]byte - Ac_exitcode uint32 - Ac_flag uint8 - Ac_nice uint8 - Pad_cgo_1 [6]byte - Cpu_count uint64 - Cpu_delay_total uint64 - Blkio_count uint64 - Blkio_delay_total uint64 - Swapin_count uint64 - Swapin_delay_total uint64 - Cpu_run_real_total uint64 - Cpu_run_virtual_total uint64 - Ac_comm [32]int8 - Ac_sched uint8 - Ac_pad [3]uint8 - Pad_cgo_2 [4]byte - Ac_uid uint32 - Ac_gid uint32 - Ac_pid uint32 - Ac_ppid uint32 - Ac_btime uint32 - Pad_cgo_3 [4]byte - Ac_etime uint64 - Ac_utime uint64 - Ac_stime uint64 - Ac_minflt uint64 - Ac_majflt uint64 - Coremem uint64 - Virtmem uint64 - Hiwater_rss uint64 - Hiwater_vm uint64 - Read_char uint64 - Write_char uint64 - Read_syscalls uint64 - Write_syscalls uint64 - Read_bytes uint64 - Write_bytes uint64 - Cancelled_write_bytes uint64 - Nvcsw uint64 - Nivcsw uint64 - Ac_utimescaled uint64 - Ac_stimescaled uint64 - Cpu_scaled_run_real_total uint64 - Freepages_count uint64 - Freepages_delay_total uint64 -} - -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 30a257f83cc..326b8e30f7e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -289,13 +289,6 @@ type IPv6Mreq struct { Interface uint32 } -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -386,11 +379,9 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofLinger = 0x8 - SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 SizeofMsghdr = 0x38 SizeofCmsghdr = 0x10 SizeofInet4Pktinfo = 0xc @@ -431,7 +422,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2c + IFLA_MAX = 0x2b RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -676,9 +667,7 @@ type Sigset_t struct { X__val [16]uint64 } -const RNDGETENTCNT = 0x40045200 - -const PERF_IOC_FLAG_GROUP = 0x1 +const _SC_PAGESIZE = 0x1e type Termios struct { Iflag uint32 @@ -690,111 +679,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -type Taskstats struct { - Version uint16 - Pad_cgo_0 [2]byte - Ac_exitcode uint32 - Ac_flag uint8 - Ac_nice uint8 - Pad_cgo_1 [6]byte - Cpu_count uint64 - Cpu_delay_total uint64 - Blkio_count uint64 - Blkio_delay_total uint64 - Swapin_count uint64 - Swapin_delay_total uint64 - Cpu_run_real_total uint64 - Cpu_run_virtual_total uint64 - Ac_comm [32]uint8 - Ac_sched uint8 - Ac_pad [3]uint8 - Pad_cgo_2 [4]byte - Ac_uid uint32 - Ac_gid uint32 - Ac_pid uint32 - Ac_ppid uint32 - Ac_btime uint32 - Pad_cgo_3 [4]byte - Ac_etime uint64 - Ac_utime uint64 - Ac_stime uint64 - Ac_minflt uint64 - Ac_majflt uint64 - Coremem uint64 - Virtmem uint64 - Hiwater_rss uint64 - Hiwater_vm uint64 - Read_char uint64 - Write_char uint64 - Read_syscalls uint64 - Write_syscalls uint64 - Read_bytes uint64 - Write_bytes uint64 - Cancelled_write_bytes uint64 - Nvcsw uint64 - Nivcsw uint64 - Ac_utimescaled uint64 - Ac_stimescaled uint64 - Cpu_scaled_run_real_total uint64 - Freepages_count uint64 - Freepages_delay_total uint64 -} - -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index bebed6f11c8..1309399b000 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -289,13 +289,6 @@ type IPv6Mreq struct { Interface uint32 } -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -386,11 +379,9 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofLinger = 0x8 - SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 SizeofMsghdr = 0x38 SizeofCmsghdr = 0x10 SizeofInet4Pktinfo = 0xc @@ -431,7 +422,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2c + IFLA_MAX = 0x2b RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -676,9 +667,7 @@ type Sigset_t struct { X__val [16]uint64 } -const RNDGETENTCNT = 0x40045200 - -const PERF_IOC_FLAG_GROUP = 0x1 +const _SC_PAGESIZE = 0x1e type Termios struct { Iflag uint32 @@ -690,111 +679,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -type Taskstats struct { - Version uint16 - Pad_cgo_0 [2]byte - Ac_exitcode uint32 - Ac_flag uint8 - Ac_nice uint8 - Pad_cgo_1 [6]byte - Cpu_count uint64 - Cpu_delay_total uint64 - Blkio_count uint64 - Blkio_delay_total uint64 - Swapin_count uint64 - Swapin_delay_total uint64 - Cpu_run_real_total uint64 - Cpu_run_virtual_total uint64 - Ac_comm [32]uint8 - Ac_sched uint8 - Ac_pad [3]uint8 - Pad_cgo_2 [4]byte - Ac_uid uint32 - Ac_gid uint32 - Ac_pid uint32 - Ac_ppid uint32 - Ac_btime uint32 - Pad_cgo_3 [4]byte - Ac_etime uint64 - Ac_utime uint64 - Ac_stime uint64 - Ac_minflt uint64 - Ac_majflt uint64 - Coremem uint64 - Virtmem uint64 - Hiwater_rss uint64 - Hiwater_vm uint64 - Read_char uint64 - Write_char uint64 - Read_syscalls uint64 - Write_syscalls uint64 - Read_bytes uint64 - Write_bytes uint64 - Cancelled_write_bytes uint64 - Nvcsw uint64 - Nivcsw uint64 - Ac_utimescaled uint64 - Ac_stimescaled uint64 - Cpu_scaled_run_real_total uint64 - Freepages_count uint64 - Freepages_delay_total uint64 -} - -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 286661b35bf..a5c40b8c45b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -288,13 +288,6 @@ type IPv6Mreq struct { Interface uint32 } -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -385,11 +378,9 @@ const ( SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 SizeofLinger = 0x8 - SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 SizeofMsghdr = 0x38 SizeofCmsghdr = 0x10 SizeofInet4Pktinfo = 0xc @@ -430,7 +421,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2c + IFLA_MAX = 0x2b RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -693,9 +684,7 @@ type Sigset_t struct { _ [16]uint64 } -const RNDGETENTCNT = 0x80045200 - -const PERF_IOC_FLAG_GROUP = 0x1 +const _SC_PAGESIZE = 0x1e type Termios struct { Iflag uint32 @@ -707,111 +696,3 @@ type Termios struct { Ispeed uint32 Ospeed uint32 } - -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -type Taskstats struct { - Version uint16 - _ [2]byte - Ac_exitcode uint32 - Ac_flag uint8 - Ac_nice uint8 - _ [6]byte - Cpu_count uint64 - Cpu_delay_total uint64 - Blkio_count uint64 - Blkio_delay_total uint64 - Swapin_count uint64 - Swapin_delay_total uint64 - Cpu_run_real_total uint64 - Cpu_run_virtual_total uint64 - Ac_comm [32]int8 - Ac_sched uint8 - Ac_pad [3]uint8 - _ [4]byte - Ac_uid uint32 - Ac_gid uint32 - Ac_pid uint32 - Ac_ppid uint32 - Ac_btime uint32 - _ [4]byte - Ac_etime uint64 - Ac_utime uint64 - Ac_stime uint64 - Ac_minflt uint64 - Ac_majflt uint64 - Coremem uint64 - Virtmem uint64 - Hiwater_rss uint64 - Hiwater_vm uint64 - Read_char uint64 - Write_char uint64 - Read_syscalls uint64 - Write_syscalls uint64 - Read_bytes uint64 - Write_bytes uint64 - Cancelled_write_bytes uint64 - Nvcsw uint64 - Nivcsw uint64 - Ac_utimescaled uint64 - Ac_stimescaled uint64 - Cpu_scaled_run_real_total uint64 - Freepages_count uint64 - Freepages_delay_total uint64 -} - -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 42f99c0a309..caf755fb86c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -382,11 +382,6 @@ type Termios struct { Ospeed int32 } -const ( - AT_FDCWD = -0x64 - AT_SYMLINK_NOFOLLOW = 0x200 -) - type Sysctlnode struct { Flags uint32 Num int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index ff290ba0697..91b4a5305a4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -389,11 +389,6 @@ type Termios struct { Ospeed int32 } -const ( - AT_FDCWD = -0x64 - AT_SYMLINK_NOFOLLOW = 0x200 -) - type Sysctlnode struct { Flags uint32 Num int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 66dbd7c0502..c0758f9d3f7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -387,11 +387,6 @@ type Termios struct { Ospeed int32 } -const ( - AT_FDCWD = -0x64 - AT_SYMLINK_NOFOLLOW = 0x200 -) - type Sysctlnode struct { Flags uint32 Num int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 20fc9f450cb..860a4697961 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -439,8 +439,3 @@ type Termios struct { Ispeed int32 Ospeed int32 } - -const ( - AT_FDCWD = -0x64 - AT_SYMLINK_NOFOLLOW = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 46fe9490c8f..23c52727f7d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -446,8 +446,3 @@ type Termios struct { Ispeed int32 Ospeed int32 } - -const ( - AT_FDCWD = -0x64 - AT_SYMLINK_NOFOLLOW = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go deleted file mode 100644 index 62e1f7c04d1..00000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ /dev/null @@ -1,439 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_openbsd.go - -// +build arm,openbsd - -package unix - -const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 -) - -type Timespec struct { - Sec int64 - Nsec int32 -} - -type Timeval struct { - Sec int64 - Usec int32 -} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int32 - Ixrss int32 - Idrss int32 - Isrss int32 - Minflt int32 - Majflt int32 - Nswap int32 - Inblock int32 - Oublock int32 - Msgsnd int32 - Msgrcv int32 - Nsignals int32 - Nvcsw int32 - Nivcsw int32 -} - -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - -const ( - S_IFMT = 0xf000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - -type Stat_t struct { - Mode uint32 - Dev int32 - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - X__st_birthtim Timespec -} - -type Statfs_t struct { - F_flags uint32 - F_bsize uint32 - F_iosize uint32 - F_blocks uint64 - F_bfree uint64 - F_bavail int64 - F_files uint64 - F_ffree uint64 - F_favail int64 - F_syncwrites uint64 - F_syncreads uint64 - F_asyncwrites uint64 - F_asyncreads uint64 - F_fsid Fsid - F_namemax uint32 - F_owner uint32 - F_ctime uint64 - F_fstypename [16]uint8 - F_mntonname [90]uint8 - F_mntfromname [90]uint8 - F_mntfromspec [90]uint8 - Pad_cgo_0 [2]byte - Mount_info [160]byte -} - -type Flock_t struct { - Start int64 - Len int64 - Pid int32 - Type int16 - Whence int16 -} - -type Dirent struct { - Fileno uint64 - Off int64 - Reclen uint16 - Type uint8 - Namlen uint8 - X__d_padding [4]uint8 - Name [256]uint8 -} - -type Fsid struct { - Val [2]int32 -} - -type RawSockaddrInet4 struct { - Len uint8 - Family uint8 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]int8 -} - -type RawSockaddrInet6 struct { - Len uint8 - Family uint8 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Len uint8 - Family uint8 - Path [104]int8 -} - -type RawSockaddrDatalink struct { - Len uint8 - Family uint8 - Index uint16 - Type uint8 - Nlen uint8 - Alen uint8 - Slen uint8 - Data [24]int8 -} - -type RawSockaddr struct { - Len uint8 - Family uint8 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [92]int8 -} - -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - -type Iovec struct { - Base *byte - Len uint32 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type Msghdr struct { - Name *byte - Namelen uint32 - Iov *Iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 -} - -type Cmsghdr struct { - Len uint32 - Level int32 - Type int32 -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Filt [8]uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x20 - SizeofLinger = 0x8 - SizeofIPMreq = 0x8 - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 -) - -const ( - PTRACE_TRACEME = 0x0 - PTRACE_CONT = 0x7 - PTRACE_KILL = 0x8 -) - -type Kevent_t struct { - Ident uint32 - Filter int16 - Flags uint16 - Fflags uint32 - Data int64 - Udata *byte -} - -type FdSet struct { - Bits [32]uint32 -} - -const ( - SizeofIfMsghdr = 0x98 - SizeofIfData = 0x80 - SizeofIfaMsghdr = 0x18 - SizeofIfAnnounceMsghdr = 0x1a - SizeofRtMsghdr = 0x60 - SizeofRtMetrics = 0x38 -) - -type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Hdrlen uint16 - Index uint16 - Tableid uint16 - Pad1 uint8 - Pad2 uint8 - Addrs int32 - Flags int32 - Xflags int32 - Data IfData -} - -type IfData struct { - Type uint8 - Addrlen uint8 - Hdrlen uint8 - Link_state uint8 - Mtu uint32 - Metric uint32 - Pad uint32 - Baudrate uint64 - Ipackets uint64 - Ierrors uint64 - Opackets uint64 - Oerrors uint64 - Collisions uint64 - Ibytes uint64 - Obytes uint64 - Imcasts uint64 - Omcasts uint64 - Iqdrops uint64 - Noproto uint64 - Capabilities uint32 - Lastchange Timeval -} - -type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Hdrlen uint16 - Index uint16 - Tableid uint16 - Pad1 uint8 - Pad2 uint8 - Addrs int32 - Flags int32 - Metric int32 -} - -type IfAnnounceMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Hdrlen uint16 - Index uint16 - What uint16 - Name [16]uint8 -} - -type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Hdrlen uint16 - Index uint16 - Tableid uint16 - Priority uint8 - Mpls uint8 - Addrs int32 - Flags int32 - Fmask int32 - Pid int32 - Seq int32 - Errno int32 - Inits uint32 - Rmx RtMetrics -} - -type RtMetrics struct { - Pksent uint64 - Expire int64 - Locks uint32 - Mtu uint32 - Refcnt uint32 - Hopcount uint32 - Recvpipe uint32 - Sendpipe uint32 - Ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Pad uint32 -} - -type Mclpool struct{} - -const ( - SizeofBpfVersion = 0x4 - SizeofBpfStat = 0x8 - SizeofBpfProgram = 0x8 - SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 -) - -type BpfVersion struct { - Major uint16 - Minor uint16 -} - -type BpfStat struct { - Recv uint32 - Drop uint32 -} - -type BpfProgram struct { - Len uint32 - Insns *BpfInsn -} - -type BpfInsn struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type BpfHdr struct { - Tstamp BpfTimeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte -} - -type BpfTimeval struct { - Sec uint32 - Usec uint32 -} - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed int32 - Ospeed int32 -} - -const ( - AT_FDCWD = -0x64 - AT_SYMLINK_NOFOLLOW = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index a979a33d51e..92336f9f923 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -413,6 +413,8 @@ type BpfHdr struct { Pad_cgo_0 [2]byte } +const _SC_PAGESIZE = 0xb + type Termios struct { Iflag uint32 Oflag uint32 From 6818387a0ab78cc7750948f1e5301b7b5fc24174 Mon Sep 17 00:00:00 2001 From: Satyam Zode Date: Wed, 27 Sep 2017 15:20:53 +0530 Subject: [PATCH 05/68] Add CHANGELOG for openebs-provisioner --- openebs/CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 openebs/CHANGELOG.md diff --git a/openebs/CHANGELOG.md b/openebs/CHANGELOG.md new file mode 100644 index 00000000000..8f03098fdc5 --- /dev/null +++ b/openebs/CHANGELOG.md @@ -0,0 +1,5 @@ +#v0.1.0 +- Dynamic Volume Provisioner for K8s - hyper converged cloud native storage - OpenEBS. +- Support for creating and deleting of OpenEBS Volumes - for the PVCs pointing to Storage Class (openebs.io/provisioner-iscsi) +- Support for attaching the OpenEBS storage to Pods via iSCSI +- Satyam Zode From b57b41a278c57701dde7cd501325bb47d8c6dd30 Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Wed, 27 Sep 2017 10:35:30 +0000 Subject: [PATCH 06/68] ceph/rbd: use AccessModesContainedInAll() in lib/util --- ceph/rbd/pkg/provision/helper.go | 42 ----------------------------- ceph/rbd/pkg/provision/provision.go | 3 ++- 2 files changed, 2 insertions(+), 43 deletions(-) delete mode 100644 ceph/rbd/pkg/provision/helper.go diff --git a/ceph/rbd/pkg/provision/helper.go b/ceph/rbd/pkg/provision/helper.go deleted file mode 100644 index a1540d58338..00000000000 --- a/ceph/rbd/pkg/provision/helper.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Helper utlities copied from kubernetes/pkg/volume. -// TODO: Merge this into github.com/kubernetes-incubator/external-storage/lib/{util or helper} if possible. - -package provision - -import "k8s.io/api/core/v1" - -// AccessModesContains returns whether the requested mode is contained by modes -func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -// AccessModesContainedInAll returns whether all of the requested modes are contained by modes -func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool { - for _, mode := range requestedModes { - if !AccessModesContains(indexedModes, mode) { - return false - } - } - return true -} diff --git a/ceph/rbd/pkg/provision/provision.go b/ceph/rbd/pkg/provision/provision.go index 2cb894a2a5e..622efdb38bb 100644 --- a/ceph/rbd/pkg/provision/provision.go +++ b/ceph/rbd/pkg/provision/provision.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/kubernetes-incubator/external-storage/lib/util" "github.com/pborman/uuid" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -90,7 +91,7 @@ func (p *rbdProvisioner) getAccessModes() []v1.PersistentVolumeAccessMode { // Provision creates a storage asset and returns a PV object representing it. func (p *rbdProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) { - if !AccessModesContainedInAll(p.getAccessModes(), options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(p.getAccessModes(), options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", options.PVC.Spec.AccessModes, p.getAccessModes()) } if options.PVC.Spec.Selector != nil { From 27daa92be8d66c7bd2d1320345211578c844852e Mon Sep 17 00:00:00 2001 From: Ian Chakeres Date: Sun, 24 Sep 2017 18:24:41 -0700 Subject: [PATCH 07/68] [local-volume] Best practices regarding provisioner discovery --- local-volume/README.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/local-volume/README.md b/local-volume/README.md index f684443fb3c..ad5dbce0af9 100644 --- a/local-volume/README.md +++ b/local-volume/README.md @@ -231,6 +231,21 @@ go run hack/e2e.go -- -v --test --test_args="--ginkgo.focus=\[Feature:LocalPersi * Avoid recreating nodes with the same node name while there are still old PVs with that node's affinity specified. Otherwise, the system could think that the new node contains the old PVs. +* While leveraging the local PV provisioner, race conditions can occur when + an admin is adding new volumes and they are being discovered. To avoid + this situation, in the discovery directories we recommend utilizing bind + mounts for mount points and symlinks for raw block volumes. For example, + if the discovery directory is /mnt/volumes and we want the provisioner + to expose /dev/sda1 as a volume named vol1, then we would bind mount + /dev/sda1 at /mnt/volumes/vol1. +* For volumes with a filesystem, its recommended to utilize their UUID (e.g. + the output from `ls -l /dev/disk/by-uuid`) both in fstab mounting logic and + in the directory name representing that volume. This practice ensures + that the wrong local volume is not mistakenly mounted, even if its mountpoint + changes (e.g. if /dev/sda1 becomes /dev/sdb1 when a new disk is added). + Additionally, this practice will ensure that if another node with the + same name is created, that any volumes on that node are unique and not + mistaken for a volume on another node with the same name. ### Deleting/removing the underlying volume From 209f83c15f645a443249d7f3aac4b147cf5c4b7d Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Wed, 20 Sep 2017 10:22:26 -0400 Subject: [PATCH 08/68] Reorganize code into packages Split the code into two packages: provisioner and volumeservice, and a command: cinder-provisioner Signed-off-by: Adam Litke --- openstack/standalone-cinder/Dockerfile | 4 +- openstack/standalone-cinder/Makefile | 4 +- .../cinder-provisioner/cinder-provisioner.go | 86 ++++++++++++ .../provisioner/iscsi.go} | 29 ++-- .../pkg/provisioner/mapper.go | 83 ++++++++++++ .../{ => pkg/provisioner}/provisioner.go | 128 +++++------------- .../{mapper-rbd.go => pkg/provisioner/rbd.go} | 13 +- .../volumeservice/actions.go} | 119 ++++++---------- .../volumeservice/connection.go} | 6 +- 9 files changed, 277 insertions(+), 195 deletions(-) create mode 100644 openstack/standalone-cinder/cmd/cinder-provisioner/cinder-provisioner.go rename openstack/standalone-cinder/{mapper-iscsi.go => pkg/provisioner/iscsi.go} (74%) create mode 100644 openstack/standalone-cinder/pkg/provisioner/mapper.go rename openstack/standalone-cinder/{ => pkg/provisioner}/provisioner.go (54%) rename openstack/standalone-cinder/{mapper-rbd.go => pkg/provisioner/rbd.go} (81%) rename openstack/standalone-cinder/{mapper.go => pkg/volumeservice/actions.go} (53%) rename openstack/standalone-cinder/{openstack.go => pkg/volumeservice/connection.go} (96%) diff --git a/openstack/standalone-cinder/Dockerfile b/openstack/standalone-cinder/Dockerfile index b0d92af14c5..e253bc8a0a5 100644 --- a/openstack/standalone-cinder/Dockerfile +++ b/openstack/standalone-cinder/Dockerfile @@ -14,6 +14,6 @@ FROM alpine:latest -COPY standalone-cinder / +COPY cinder-provisioner / -ENTRYPOINT ["/standalone-cinder"] +ENTRYPOINT ["/cinder-provisioner"] diff --git a/openstack/standalone-cinder/Makefile b/openstack/standalone-cinder/Makefile index ee2983f3242..6fa1fe84118 100644 --- a/openstack/standalone-cinder/Makefile +++ b/openstack/standalone-cinder/Makefile @@ -28,7 +28,7 @@ MUTABLE_IMAGE = $(REGISTRY)standalone-cinder-provisioner:latest all: build build: - CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o standalone-cinder + CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o cinder-provisioner ./cmd/cinder-provisioner container: build docker build -t $(MUTABLE_IMAGE) . @@ -36,5 +36,5 @@ container: build clean: - -rm standalone-cinder + -rm cinder-provisioner diff --git a/openstack/standalone-cinder/cmd/cinder-provisioner/cinder-provisioner.go b/openstack/standalone-cinder/cmd/cinder-provisioner/cinder-provisioner.go new file mode 100644 index 00000000000..6b93b9153de --- /dev/null +++ b/openstack/standalone-cinder/cmd/cinder-provisioner/cinder-provisioner.go @@ -0,0 +1,86 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + + "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/provisioner" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +var ( + master = flag.String("master", "", "Master URL") + kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig") + id = flag.String("id", "", "Unique provisioner identity") + cloudconfig = flag.String("cloudconfig", "", "Path to OpenStack config file") +) + +func main() { + flag.Parse() + flag.Set("logtostderr", "true") + + var config *rest.Config + var err error + if *master != "" || *kubeconfig != "" { + config, err = clientcmd.BuildConfigFromFlags(*master, *kubeconfig) + } else { + config, err = rest.InClusterConfig() + } + prID := provisioner.ProvisionerName + if *id != "" { + prID = *id + } + if err != nil { + glog.Fatalf("Failed to create config: %v", err) + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + glog.Fatalf("Failed to create client: %v", err) + } + + // The controller needs to know what the server version is because out-of-tree + // provisioners aren't officially supported until 1.5 + serverVersion, err := clientset.Discovery().ServerVersion() + if err != nil { + glog.Fatalf("Error getting server version: %v", err) + } + + // Create the provisioner: it implements the Provisioner interface expected by + // the controller + cinderProvisioner, err := provisioner.NewCinderProvisioner(clientset, prID, *cloudconfig) + if err != nil { + glog.Fatalf("Error creating Cinder provisioner: %v", err) + } + + // Start the provision controller which will dynamically provision cinder + // PVs + pc := controller.NewProvisionController( + clientset, + provisioner.ProvisionerName, + cinderProvisioner, + serverVersion.GitVersion, + ) + + pc.Run(wait.NeverStop) +} diff --git a/openstack/standalone-cinder/mapper-iscsi.go b/openstack/standalone-cinder/pkg/provisioner/iscsi.go similarity index 74% rename from openstack/standalone-cinder/mapper-iscsi.go rename to openstack/standalone-cinder/pkg/provisioner/iscsi.go index 2ece6503f76..902d0bd92df 100644 --- a/openstack/standalone-cinder/mapper-iscsi.go +++ b/openstack/standalone-cinder/pkg/provisioner/iscsi.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package provisioner import ( "github.com/golang/glog" @@ -23,15 +23,14 @@ import ( ) const iscsiType = "iscsi" -const initiatorName = "iqn.1994-05.com.redhat:a13fc3d1cc22" type iscsiMapper struct { volumeMapper } func getChapSecretName(ctx provisionCtx) string { - if ctx.connection.Data.AuthMethod == "CHAP" { - return ctx.options.PVName + "-secret" + if ctx.Connection.Data.AuthMethod == "CHAP" { + return ctx.Options.PVName + "-secret" } return "" } @@ -40,9 +39,9 @@ func (m *iscsiMapper) BuildPVSource(ctx provisionCtx) (*v1.PersistentVolumeSourc ret := &v1.PersistentVolumeSource{ ISCSI: &v1.ISCSIVolumeSource{ // TODO: Need some way to specify the initiator name - TargetPortal: ctx.connection.Data.TargetPortal, - IQN: ctx.connection.Data.TargetIqn, - Lun: ctx.connection.Data.TargetLun, + TargetPortal: ctx.Connection.Data.TargetPortal, + IQN: ctx.Connection.Data.TargetIqn, + Lun: ctx.Connection.Data.TargetLun, SessionCHAPAuth: false, }, } @@ -69,12 +68,12 @@ func (m *iscsiMapper) AuthSetup(ctx provisionCtx) error { }, Type: "kubernetes.io/iscsi-chap", Data: map[string][]byte{ - "node.session.auth.username": []byte(ctx.connection.Data.AuthUsername), - "node.session.auth.password": []byte(ctx.connection.Data.AuthPassword), + "node.session.auth.username": []byte(ctx.Connection.Data.AuthUsername), + "node.session.auth.password": []byte(ctx.Connection.Data.AuthPassword), }, } - namespace := ctx.options.PVC.Namespace - _, err := ctx.p.client.CoreV1().Secrets(namespace).Create(secret) + namespace := ctx.Options.PVC.Namespace + _, err := ctx.P.Client.CoreV1().Secrets(namespace).Create(secret) if err != nil { glog.Errorf("Failed to create chap secret in namespace %s: %v", namespace, err) return err @@ -85,14 +84,14 @@ func (m *iscsiMapper) AuthSetup(ctx provisionCtx) error { func (m *iscsiMapper) AuthTeardown(ctx deleteCtx) error { // Delete the CHAP credentials - if ctx.pv.Spec.ISCSI.SecretRef == nil { + if ctx.PV.Spec.ISCSI.SecretRef == nil { glog.V(3).Info("No associated secret to delete") return nil } - secretName := ctx.pv.Spec.ISCSI.SecretRef.Name - secretNamespace := ctx.pv.Spec.ClaimRef.Namespace - err := ctx.p.client.CoreV1().Secrets(secretNamespace).Delete(secretName, nil) + secretName := ctx.PV.Spec.ISCSI.SecretRef.Name + secretNamespace := ctx.PV.Spec.ClaimRef.Namespace + err := ctx.P.Client.CoreV1().Secrets(secretNamespace).Delete(secretName, nil) if err != nil { glog.Errorf("Failed to remove secret %s from namespace %s: %v", secretName, secretNamespace, err) return err diff --git a/openstack/standalone-cinder/pkg/provisioner/mapper.go b/openstack/standalone-cinder/pkg/provisioner/mapper.go new file mode 100644 index 00000000000..0c0436e8a43 --- /dev/null +++ b/openstack/standalone-cinder/pkg/provisioner/mapper.go @@ -0,0 +1,83 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "errors" + "fmt" + + "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type volumeMapper interface { + BuildPVSource(ctx provisionCtx) (*v1.PersistentVolumeSource, error) + AuthSetup(ctx provisionCtx) error + AuthTeardown(ctx deleteCtx) error +} + +func newVolumeMapperFromConnection(conn volumeservice.VolumeConnection) (volumeMapper, error) { + switch conn.DriverVolumeType { + default: + msg := fmt.Sprintf("Unsupported persistent volume type: %s", conn.DriverVolumeType) + return nil, errors.New(msg) + case iscsiType: + return new(iscsiMapper), nil + case rbdType: + return new(rbdMapper), nil + } +} + +func newVolumeMapperFromPV(ctx deleteCtx) (volumeMapper, error) { + if ctx.PV.Spec.ISCSI != nil { + return new(iscsiMapper), nil + } else if ctx.PV.Spec.RBD != nil { + return new(rbdMapper), nil + } else { + return nil, errors.New("Unsupported persistent volume source") + } +} + +func buildPV(m volumeMapper, ctx provisionCtx, volumeID string) (*v1.PersistentVolume, error) { + pvSource, err := m.BuildPVSource(ctx) + if err != nil { + glog.Errorf("Failed to build PV Source element: %v", err) + return nil, err + } + + pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: ctx.Options.PVName, + Namespace: ctx.Options.PVC.Namespace, + Annotations: map[string]string{ + ProvisionerIDAnn: ctx.P.Identity, + CinderVolumeID: volumeID, + }, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: ctx.Options.PersistentVolumeReclaimPolicy, + AccessModes: ctx.Options.PVC.Spec.AccessModes, + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): ctx.Options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], + }, + PersistentVolumeSource: *pvSource, + }, + } + return pv, nil +} diff --git a/openstack/standalone-cinder/provisioner.go b/openstack/standalone-cinder/pkg/provisioner/provisioner.go similarity index 54% rename from openstack/standalone-cinder/provisioner.go rename to openstack/standalone-cinder/pkg/provisioner/provisioner.go index 320bb1a747d..ad4878afb09 100644 --- a/openstack/standalone-cinder/provisioner.go +++ b/openstack/standalone-cinder/pkg/provisioner/provisioner.go @@ -14,65 +14,67 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package provisioner import ( "errors" - "flag" "fmt" "github.com/golang/glog" "github.com/gophercloud/gophercloud" "github.com/kubernetes-incubator/external-storage/lib/controller" - + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" ) const ( - provisionerName = "openstack.org/standalone-cinder" - provisionerIDAnn = "standaloneCinderProvisionerIdentity" - cinderVolumeID = "cinderVolumeId" + // ProvisionerName is the unique name of this provisioner + ProvisionerName = "openstack.org/standalone-cinder" + + // ProvisionerIDAnn is an annotation to identify a particular instance of this provisioner + ProvisionerIDAnn = "standaloneCinderProvisionerIdentity" + + // CinderVolumeID is an annotation to store the ID of the associated cinder volume + CinderVolumeID = "cinderVolumeId" ) type cinderProvisioner struct { // Openstack cinder client - volumeService *gophercloud.ServiceClient + VolumeService *gophercloud.ServiceClient // Kubernetes Client. Use to create secret - client kubernetes.Interface + Client kubernetes.Interface // Identity of this cinderProvisioner, generated. Used to identify "this" // provisioner's PVs. - identity string + Identity string } -func newCinderProvisioner(client kubernetes.Interface, id, configFilePath string) (controller.Provisioner, error) { - volumeService, err := getVolumeService(configFilePath) +// NewCinderProvisioner returns a Provisioner that creates volumes using a +// standalone cinder instance and produces PersistentVolumes that use native +// kubernetes PersistentVolumeSources. +func NewCinderProvisioner(client kubernetes.Interface, id, configFilePath string) (controller.Provisioner, error) { + volumeService, err := volumeservice.GetVolumeService(configFilePath) if err != nil { return nil, fmt.Errorf("failed to get volume service: %v", err) } return &cinderProvisioner{ - volumeService: volumeService, - client: client, - identity: id, + VolumeService: volumeService, + Client: client, + Identity: id, }, nil } -var _ controller.Provisioner = &cinderProvisioner{} - type provisionCtx struct { - p *cinderProvisioner - options controller.VolumeOptions - connection volumeConnection + P *cinderProvisioner + Options controller.VolumeOptions + Connection volumeservice.VolumeConnection } type deleteCtx struct { - p *cinderProvisioner - pv *v1.PersistentVolume + P *cinderProvisioner + PV *v1.PersistentVolume } // Provision creates a storage asset and returns a PV object representing it. @@ -81,26 +83,26 @@ func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.Per return nil, fmt.Errorf("claim Selector is not supported") } - volumeID, err := createCinderVolume(p, options) + volumeID, err := volumeservice.CreateCinderVolume(p.VolumeService, options) if err != nil { glog.Errorf("Failed to create volume") return nil, err } - err = waitForAvailableCinderVolume(p, volumeID) + err = volumeservice.WaitForAvailableCinderVolume(p.VolumeService, volumeID) if err != nil { glog.Errorf("Volume did not become available") return nil, err } - err = reserveCinderVolume(p, volumeID) + err = volumeservice.ReserveCinderVolume(p.VolumeService, volumeID) if err != nil { // TODO: Create placeholder PV? glog.Errorf("Failed to reserve volume: %v", err) return nil, err } - connection, err := connectCinderVolume(p, volumeID) + connection, err := volumeservice.ConnectCinderVolume(p.VolumeService, volumeID) if err != nil { // TODO: Create placeholder PV? glog.Errorf("Failed to connect volume: %v", err) @@ -135,11 +137,11 @@ func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.Per // Delete removes the storage asset that was created by Provision represented // by the given PV. func (p *cinderProvisioner) Delete(pv *v1.PersistentVolume) error { - ann, ok := pv.Annotations[provisionerIDAnn] + ann, ok := pv.Annotations[ProvisionerIDAnn] if !ok { return errors.New("identity annotation not found on PV") } - if ann != p.identity { + if ann != p.Identity { return &controller.IgnoredError{ Reason: "identity annotation on PV does not match ours", } @@ -147,9 +149,9 @@ func (p *cinderProvisioner) Delete(pv *v1.PersistentVolume) error { // TODO when beta is removed, have to check kube version and pick v1/beta // accordingly: maybe the controller lib should offer a function for that - volumeID, ok := pv.Annotations[cinderVolumeID] + volumeID, ok := pv.Annotations[CinderVolumeID] if !ok { - return errors.New(cinderVolumeID + " annotation not found on PV") + return errors.New(CinderVolumeID + " annotation not found on PV") } ctx := deleteCtx{p, pv} @@ -160,19 +162,19 @@ func (p *cinderProvisioner) Delete(pv *v1.PersistentVolume) error { mapper.AuthTeardown(ctx) - err = disconnectCinderVolume(p, volumeID) + err = volumeservice.DisconnectCinderVolume(p.VolumeService, volumeID) if err != nil { return err } - err = unreserveCinderVolume(p, volumeID) + err = volumeservice.UnreserveCinderVolume(p.VolumeService, volumeID) if err != nil { // TODO: Create placeholder PV? glog.Errorf("Failed to unreserve volume: %v", err) return err } - err = deleteCinderVolume(p, volumeID) + err = volumeservice.DeleteCinderVolume(p.VolumeService, volumeID) if err != nil { return err } @@ -180,59 +182,3 @@ func (p *cinderProvisioner) Delete(pv *v1.PersistentVolume) error { glog.V(2).Infof("Successfully deleted cinder volume %s", volumeID) return nil } - -var ( - master = flag.String("master", "", "Master URL") - kubeconfig = flag.String("kubeconfig", "", "Absolute path to the kubeconfig") - id = flag.String("id", "", "Unique provisioner identity") - cloudconfig = flag.String("cloudconfig", "", "Path to OpenStack config file") -) - -func main() { - flag.Parse() - flag.Set("logtostderr", "true") - - var config *rest.Config - var err error - if *master != "" || *kubeconfig != "" { - config, err = clientcmd.BuildConfigFromFlags(*master, *kubeconfig) - } else { - config, err = rest.InClusterConfig() - } - prID := provisionerName - if *id != "" { - prID = *id - } - if err != nil { - glog.Fatalf("Failed to create config: %v", err) - } - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - glog.Fatalf("Failed to create client: %v", err) - } - - // The controller needs to know what the server version is because out-of-tree - // provisioners aren't officially supported until 1.5 - serverVersion, err := clientset.Discovery().ServerVersion() - if err != nil { - glog.Fatalf("Error getting server version: %v", err) - } - - // Create the provisioner: it implements the Provisioner interface expected by - // the controller - cinderProvisioner, err := newCinderProvisioner(clientset, prID, *cloudconfig) - if err != nil { - glog.Fatalf("Error creating Cinder provisioner: %v", err) - } - - // Start the provision controller which will dynamically provision cinder - // PVs - pc := controller.NewProvisionController( - clientset, - provisionerName, - cinderProvisioner, - serverVersion.GitVersion, - ) - - pc.Run(wait.NeverStop) -} diff --git a/openstack/standalone-cinder/mapper-rbd.go b/openstack/standalone-cinder/pkg/provisioner/rbd.go similarity index 81% rename from openstack/standalone-cinder/mapper-rbd.go rename to openstack/standalone-cinder/pkg/provisioner/rbd.go index 01166e0d449..82d35e605b3 100644 --- a/openstack/standalone-cinder/mapper-rbd.go +++ b/openstack/standalone-cinder/pkg/provisioner/rbd.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package provisioner import ( "errors" @@ -22,6 +22,7 @@ import ( "strings" "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" "k8s.io/api/core/v1" ) @@ -31,7 +32,7 @@ type rbdMapper struct { volumeMapper } -func getMonitors(conn volumeConnection) []string { +func getMonitors(conn volumeservice.VolumeConnection) []string { if len(conn.Data.Hosts) != len(conn.Data.Ports) { glog.Errorf("Error parsing rbd connection info: 'hosts' and 'ports' have different lengths") return nil @@ -44,15 +45,15 @@ func getMonitors(conn volumeConnection) []string { } func getRbdSecretName(ctx provisionCtx) string { - return fmt.Sprintf("%s-cephx-secret", *ctx.options.PVC.Spec.StorageClassName) + return fmt.Sprintf("%s-cephx-secret", *ctx.Options.PVC.Spec.StorageClassName) } func (m *rbdMapper) BuildPVSource(ctx provisionCtx) (*v1.PersistentVolumeSource, error) { - mons := getMonitors(ctx.connection) + mons := getMonitors(ctx.Connection) if mons == nil { return nil, errors.New("No monitors could be parsed from connection info") } - splitName := strings.SplitN(ctx.connection.Data.Name, "/", 2) + splitName := strings.SplitN(ctx.Connection.Data.Name, "/", 2) if len(splitName) != 2 { return nil, errors.New("Field 'name' cannot be split into pool and image") } @@ -62,7 +63,7 @@ func (m *rbdMapper) BuildPVSource(ctx provisionCtx) (*v1.PersistentVolumeSource, CephMonitors: mons, RBDPool: splitName[0], RBDImage: splitName[1], - RadosUser: ctx.connection.Data.AuthUsername, + RadosUser: ctx.Connection.Data.AuthUsername, SecretRef: &v1.LocalObjectReference{ Name: getRbdSecretName(ctx), }, diff --git a/openstack/standalone-cinder/mapper.go b/openstack/standalone-cinder/pkg/volumeservice/actions.go similarity index 53% rename from openstack/standalone-cinder/mapper.go rename to openstack/standalone-cinder/pkg/volumeservice/actions.go index 8b4da8a1f4d..11b0cde32dd 100644 --- a/openstack/standalone-cinder/mapper.go +++ b/openstack/standalone-cinder/pkg/volumeservice/actions.go @@ -14,24 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package volumeservice import ( - "errors" "fmt" "strings" "time" "github.com/golang/glog" + "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" volumes_v2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" "github.com/kubernetes-incubator/external-storage/lib/controller" "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" ) -type volumeConnectionDetails struct { +const initiatorName = "iqn.1994-05.com.redhat:a13fc3d1cc22" + +// VolumeConnectionDetails represent the type-specific values for a given +// DriverVolumeType. Depending on the volume type, fields may be absent or +// have a semantically different meaning. +type VolumeConnectionDetails struct { VolumeID string `json:"volume_id"` Name string `json:"name"` @@ -49,16 +53,19 @@ type volumeConnectionDetails struct { Ports []string `json:"ports"` } -type volumeConnection struct { +// VolumeConnection represents the connection information returned from the +// cinder os-initialize_connection API call +type VolumeConnection struct { DriverVolumeType string `json:"driver_volume_type"` - Data volumeConnectionDetails `json:"data"` + Data VolumeConnectionDetails `json:"data"` } type rcvVolumeConnection struct { - ConnectionInfo volumeConnection `json:"connection_info"` + ConnectionInfo VolumeConnection `json:"connection_info"` } -func createCinderVolume(p *cinderProvisioner, options controller.VolumeOptions) (string, error) { +// CreateCinderVolume creates a new volume in cinder according to the PVC specifications +func CreateCinderVolume(vs *gophercloud.ServiceClient, options controller.VolumeOptions) (string, error) { name := fmt.Sprintf("cinder-dynamic-pvc-%s", uuid.NewUUID()) capacity := options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] sizeBytes := capacity.Value() @@ -86,7 +93,7 @@ func createCinderVolume(p *cinderProvisioner, options controller.VolumeOptions) AvailabilityZone: availability, } - vol, err := volumes_v2.Create(p.volumeService, &opts).Extract() + vol, err := volumes_v2.Create(vs, &opts).Extract() if err != nil { glog.Errorf("Failed to create a %d GiB volume: %v", sizeGB, err) @@ -97,7 +104,10 @@ func createCinderVolume(p *cinderProvisioner, options controller.VolumeOptions) return vol.ID, nil } -func waitForAvailableCinderVolume(p *cinderProvisioner, volumeID string) error { +// WaitForAvailableCinderVolume waits for a newly created cinder volume to +// become available. The connection information cannot be retrieved from cinder +// until the volume is available. +func WaitForAvailableCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { // TODO: Implement proper polling instead of brain-dead timers c := make(chan error) go time.AfterFunc(5*time.Second, func() { @@ -106,34 +116,42 @@ func waitForAvailableCinderVolume(p *cinderProvisioner, volumeID string) error { return <-c } -func reserveCinderVolume(p *cinderProvisioner, volumeID string) error { - return volumeactions.Reserve(p.volumeService, volumeID).ExtractErr() +// ReserveCinderVolume marks the volume as 'Attaching' in cinder. This prevents +// the volume from being used for another purpose. +func ReserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return volumeactions.Reserve(vs, volumeID).ExtractErr() } -func connectCinderVolume(p *cinderProvisioner, volumeID string) (volumeConnection, error) { +// ConnectCinderVolume retrieves connection information for a cinder volume. +// Depending on the type of volume, cinder may perform setup on a storage server +// such as mapping a LUN to a particular ISCSI initiator. +func ConnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) (VolumeConnection, error) { opt := volumeactions.InitializeConnectionOpts{ Host: "localhost", IP: "127.0.0.1", Initiator: initiatorName, } var rcv rcvVolumeConnection - err := volumeactions.InitializeConnection(p.volumeService, volumeID, &opt).ExtractInto(&rcv) + err := volumeactions.InitializeConnection(vs, volumeID, &opt).ExtractInto(&rcv) if err != nil { glog.Errorf("failed to initialize connection :%v", err) - return volumeConnection{}, err + return VolumeConnection{}, err } glog.V(3).Infof("Received connection info: %v", rcv) return rcv.ConnectionInfo, nil } -func disconnectCinderVolume(p *cinderProvisioner, volumeID string) error { +// DisconnectCinderVolume removes a connection to a cinder volume. Depending on +// the volume type, this may cause cinder to clean up the connection at a +// storage server (i.e. remove a LUN mapping). +func DisconnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { opt := volumeactions.TerminateConnectionOpts{ Host: "localhost", IP: "127.0.0.1", Initiator: initiatorName, } - err := volumeactions.TerminateConnection(p.volumeService, volumeID, &opt).Result.Err + err := volumeactions.TerminateConnection(vs, volumeID, &opt).Result.Err if err != nil { glog.Errorf("Failed to terminate connection to volume %s: %v", volumeID, err) @@ -143,71 +161,18 @@ func disconnectCinderVolume(p *cinderProvisioner, volumeID string) error { return nil } -func unreserveCinderVolume(p *cinderProvisioner, volumeID string) error { - return volumeactions.Unreserve(p.volumeService, volumeID).ExtractErr() +// UnreserveCinderVolume marks a cinder volume in 'Attaching' state as 'Available'. +func UnreserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return volumeactions.Unreserve(vs, volumeID).ExtractErr() } -func deleteCinderVolume(p *cinderProvisioner, volumeID string) error { - err := volumes_v2.Delete(p.volumeService, volumeID).ExtractErr() +// DeleteCinderVolume removes a volume from cinder which will cause it to be +// deleted on the storage server. +func DeleteCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + err := volumes_v2.Delete(vs, volumeID).ExtractErr() if err != nil { glog.Errorf("Cannot delete volume %s: %v", volumeID, err) } return err } - -type volumeMapper interface { - BuildPVSource(ctx provisionCtx) (*v1.PersistentVolumeSource, error) - AuthSetup(ctx provisionCtx) error - AuthTeardown(ctx deleteCtx) error -} - -func newVolumeMapperFromConnection(conn volumeConnection) (volumeMapper, error) { - switch conn.DriverVolumeType { - default: - msg := fmt.Sprintf("Unsupported persistent volume type: %s", conn.DriverVolumeType) - return nil, errors.New(msg) - case iscsiType: - return new(iscsiMapper), nil - case rbdType: - return new(rbdMapper), nil - } -} - -func newVolumeMapperFromPV(ctx deleteCtx) (volumeMapper, error) { - if ctx.pv.Spec.ISCSI != nil { - return new(iscsiMapper), nil - } else if ctx.pv.Spec.RBD != nil { - return new(rbdMapper), nil - } else { - return nil, errors.New("Unsupported persistent volume source") - } -} - -func buildPV(m volumeMapper, ctx provisionCtx, volumeID string) (*v1.PersistentVolume, error) { - pvSource, err := m.BuildPVSource(ctx) - if err != nil { - glog.Errorf("Failed to build PV Source element: %v", err) - return nil, err - } - - pv := &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: ctx.options.PVName, - Namespace: ctx.options.PVC.Namespace, - Annotations: map[string]string{ - provisionerIDAnn: ctx.p.identity, - cinderVolumeID: volumeID, - }, - }, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: ctx.options.PersistentVolumeReclaimPolicy, - AccessModes: ctx.options.PVC.Spec.AccessModes, - Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): ctx.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], - }, - PersistentVolumeSource: *pvSource, - }, - } - return pv, nil -} diff --git a/openstack/standalone-cinder/openstack.go b/openstack/standalone-cinder/pkg/volumeservice/connection.go similarity index 96% rename from openstack/standalone-cinder/openstack.go rename to openstack/standalone-cinder/pkg/volumeservice/connection.go index a363dc64c58..e90f1be3406 100644 --- a/openstack/standalone-cinder/openstack.go +++ b/openstack/standalone-cinder/pkg/volumeservice/connection.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package volumeservice import ( "crypto/tls" @@ -173,7 +173,9 @@ func getKeystoneVolumeService(cfg cinderConfig) (*gophercloud.ServiceClient, err return volumeService, nil } -func getVolumeService(configFilePath string) (*gophercloud.ServiceClient, error) { +// GetVolumeService returns a connected cinder client based on configuration +// specified in configFilePath or the environment. +func GetVolumeService(configFilePath string) (*gophercloud.ServiceClient, error) { config, err := getConfig(configFilePath) if err != nil { return nil, err From 69291ca69d1584ca211be69d74787facb80090fb Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Thu, 21 Sep 2017 12:47:41 -0400 Subject: [PATCH 09/68] Remove ProvisionCtx and DeleteCtx These two structures just bundle function arguments and make it harder to individually test components. Get rid of them and just pass the required parameters directly. Signed-off-by: Adam Litke --- .../pkg/provisioner/iscsi.go | 56 ++++++++++--------- .../pkg/provisioner/mapper.go | 29 +++++----- .../pkg/provisioner/provisioner.go | 21 ++----- .../standalone-cinder/pkg/provisioner/rbd.go | 19 ++++--- 4 files changed, 60 insertions(+), 65 deletions(-) diff --git a/openstack/standalone-cinder/pkg/provisioner/iscsi.go b/openstack/standalone-cinder/pkg/provisioner/iscsi.go index 902d0bd92df..93199459969 100644 --- a/openstack/standalone-cinder/pkg/provisioner/iscsi.go +++ b/openstack/standalone-cinder/pkg/provisioner/iscsi.go @@ -18,6 +18,8 @@ package provisioner import ( "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -28,24 +30,24 @@ type iscsiMapper struct { volumeMapper } -func getChapSecretName(ctx provisionCtx) string { - if ctx.Connection.Data.AuthMethod == "CHAP" { - return ctx.Options.PVName + "-secret" +func getChapSecretName(connection volumeservice.VolumeConnection, options controller.VolumeOptions) string { + if connection.Data.AuthMethod == "CHAP" { + return options.PVName + "-secret" } return "" } -func (m *iscsiMapper) BuildPVSource(ctx provisionCtx) (*v1.PersistentVolumeSource, error) { +func (m *iscsiMapper) BuildPVSource(conn volumeservice.VolumeConnection, options controller.VolumeOptions) (*v1.PersistentVolumeSource, error) { ret := &v1.PersistentVolumeSource{ ISCSI: &v1.ISCSIVolumeSource{ // TODO: Need some way to specify the initiator name - TargetPortal: ctx.Connection.Data.TargetPortal, - IQN: ctx.Connection.Data.TargetIqn, - Lun: ctx.Connection.Data.TargetLun, + TargetPortal: conn.Data.TargetPortal, + IQN: conn.Data.TargetIqn, + Lun: conn.Data.TargetLun, SessionCHAPAuth: false, }, } - secretName := getChapSecretName(ctx) + secretName := getChapSecretName(conn, options) if secretName != "" { ret.ISCSI.SessionCHAPAuth = true ret.ISCSI.SecretRef = &v1.LocalObjectReference{ @@ -55,9 +57,19 @@ func (m *iscsiMapper) BuildPVSource(ctx provisionCtx) (*v1.PersistentVolumeSourc return ret, nil } -func (m *iscsiMapper) AuthSetup(ctx provisionCtx) error { +func createSecret(p *cinderProvisioner, ns string, secret *v1.Secret) error { + _, err := p.Client.CoreV1().Secrets(ns).Create(secret) + if err != nil { + glog.Errorf("Failed to create chap secret in namespace %s: %v", ns, err) + return err + } + glog.V(3).Infof("Secret %s created", secret.ObjectMeta.Name) + return nil +} + +func (m *iscsiMapper) AuthSetup(p *cinderProvisioner, options controller.VolumeOptions, conn volumeservice.VolumeConnection) error { // Create a secret for the CHAP credentials - secretName := getChapSecretName(ctx) + secretName := getChapSecretName(conn, options) if secretName == "" { glog.V(3).Info("No CHAP authentication secret necessary") return nil @@ -68,30 +80,24 @@ func (m *iscsiMapper) AuthSetup(ctx provisionCtx) error { }, Type: "kubernetes.io/iscsi-chap", Data: map[string][]byte{ - "node.session.auth.username": []byte(ctx.Connection.Data.AuthUsername), - "node.session.auth.password": []byte(ctx.Connection.Data.AuthPassword), + "node.session.auth.username": []byte(conn.Data.AuthUsername), + "node.session.auth.password": []byte(conn.Data.AuthPassword), }, } - namespace := ctx.Options.PVC.Namespace - _, err := ctx.P.Client.CoreV1().Secrets(namespace).Create(secret) - if err != nil { - glog.Errorf("Failed to create chap secret in namespace %s: %v", namespace, err) - return err - } - glog.V(3).Infof("Secret %s created", secretName) - return nil + namespace := options.PVC.Namespace + return createSecret(p, namespace, secret) } -func (m *iscsiMapper) AuthTeardown(ctx deleteCtx) error { +func (m *iscsiMapper) AuthTeardown(p *cinderProvisioner, pv *v1.PersistentVolume) error { // Delete the CHAP credentials - if ctx.PV.Spec.ISCSI.SecretRef == nil { + if pv.Spec.ISCSI.SecretRef == nil { glog.V(3).Info("No associated secret to delete") return nil } - secretName := ctx.PV.Spec.ISCSI.SecretRef.Name - secretNamespace := ctx.PV.Spec.ClaimRef.Namespace - err := ctx.P.Client.CoreV1().Secrets(secretNamespace).Delete(secretName, nil) + secretName := pv.Spec.ISCSI.SecretRef.Name + secretNamespace := pv.Spec.ClaimRef.Namespace + err := p.Client.CoreV1().Secrets(secretNamespace).Delete(secretName, nil) if err != nil { glog.Errorf("Failed to remove secret %s from namespace %s: %v", secretName, secretNamespace, err) return err diff --git a/openstack/standalone-cinder/pkg/provisioner/mapper.go b/openstack/standalone-cinder/pkg/provisioner/mapper.go index 0c0436e8a43..93a122eb66c 100644 --- a/openstack/standalone-cinder/pkg/provisioner/mapper.go +++ b/openstack/standalone-cinder/pkg/provisioner/mapper.go @@ -21,15 +21,16 @@ import ( "fmt" "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/lib/controller" "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type volumeMapper interface { - BuildPVSource(ctx provisionCtx) (*v1.PersistentVolumeSource, error) - AuthSetup(ctx provisionCtx) error - AuthTeardown(ctx deleteCtx) error + BuildPVSource(conn volumeservice.VolumeConnection, options controller.VolumeOptions) (*v1.PersistentVolumeSource, error) + AuthSetup(p *cinderProvisioner, options controller.VolumeOptions, conn volumeservice.VolumeConnection) error + AuthTeardown(p *cinderProvisioner, pv *v1.PersistentVolume) error } func newVolumeMapperFromConnection(conn volumeservice.VolumeConnection) (volumeMapper, error) { @@ -44,18 +45,18 @@ func newVolumeMapperFromConnection(conn volumeservice.VolumeConnection) (volumeM } } -func newVolumeMapperFromPV(ctx deleteCtx) (volumeMapper, error) { - if ctx.PV.Spec.ISCSI != nil { +func newVolumeMapperFromPV(pv *v1.PersistentVolume) (volumeMapper, error) { + if pv.Spec.ISCSI != nil { return new(iscsiMapper), nil - } else if ctx.PV.Spec.RBD != nil { + } else if pv.Spec.RBD != nil { return new(rbdMapper), nil } else { return nil, errors.New("Unsupported persistent volume source") } } -func buildPV(m volumeMapper, ctx provisionCtx, volumeID string) (*v1.PersistentVolume, error) { - pvSource, err := m.BuildPVSource(ctx) +func buildPV(m volumeMapper, p *cinderProvisioner, options controller.VolumeOptions, conn volumeservice.VolumeConnection, volumeID string) (*v1.PersistentVolume, error) { + pvSource, err := m.BuildPVSource(conn, options) if err != nil { glog.Errorf("Failed to build PV Source element: %v", err) return nil, err @@ -63,18 +64,18 @@ func buildPV(m volumeMapper, ctx provisionCtx, volumeID string) (*v1.PersistentV pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ - Name: ctx.Options.PVName, - Namespace: ctx.Options.PVC.Namespace, + Name: options.PVName, + Namespace: options.PVC.Namespace, Annotations: map[string]string{ - ProvisionerIDAnn: ctx.P.Identity, + ProvisionerIDAnn: p.Identity, CinderVolumeID: volumeID, }, }, Spec: v1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: ctx.Options.PersistentVolumeReclaimPolicy, - AccessModes: ctx.Options.PVC.Spec.AccessModes, + PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, + AccessModes: options.PVC.Spec.AccessModes, Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): ctx.Options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], + v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], }, PersistentVolumeSource: *pvSource, }, diff --git a/openstack/standalone-cinder/pkg/provisioner/provisioner.go b/openstack/standalone-cinder/pkg/provisioner/provisioner.go index ad4878afb09..f9879feb180 100644 --- a/openstack/standalone-cinder/pkg/provisioner/provisioner.go +++ b/openstack/standalone-cinder/pkg/provisioner/provisioner.go @@ -66,17 +66,6 @@ func NewCinderProvisioner(client kubernetes.Interface, id, configFilePath string }, nil } -type provisionCtx struct { - P *cinderProvisioner - Options controller.VolumeOptions - Connection volumeservice.VolumeConnection -} - -type deleteCtx struct { - P *cinderProvisioner - PV *v1.PersistentVolume -} - // Provision creates a storage asset and returns a PV object representing it. func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) { if options.PVC.Spec.Selector != nil { @@ -116,15 +105,14 @@ func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.Per return nil, err } - ctx := provisionCtx{p, options, connection} - err = mapper.AuthSetup(ctx) + err = mapper.AuthSetup(p, options, connection) if err != nil { // TODO: Create placeholder PV? glog.Errorf("Failed to prepare volume auth: %v", err) return nil, err } - pv, err := buildPV(mapper, ctx, volumeID) + pv, err := buildPV(mapper, p, options, connection, volumeID) if err != nil { // TODO: Create placeholder PV? glog.Errorf("Failed to build PV: %v", err) @@ -154,13 +142,12 @@ func (p *cinderProvisioner) Delete(pv *v1.PersistentVolume) error { return errors.New(CinderVolumeID + " annotation not found on PV") } - ctx := deleteCtx{p, pv} - mapper, err := newVolumeMapperFromPV(ctx) + mapper, err := newVolumeMapperFromPV(pv) if err != nil { return err } - mapper.AuthTeardown(ctx) + mapper.AuthTeardown(p, pv) err = volumeservice.DisconnectCinderVolume(p.VolumeService, volumeID) if err != nil { diff --git a/openstack/standalone-cinder/pkg/provisioner/rbd.go b/openstack/standalone-cinder/pkg/provisioner/rbd.go index 82d35e605b3..8020f52f3b5 100644 --- a/openstack/standalone-cinder/pkg/provisioner/rbd.go +++ b/openstack/standalone-cinder/pkg/provisioner/rbd.go @@ -22,6 +22,7 @@ import ( "strings" "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/lib/controller" "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" "k8s.io/api/core/v1" ) @@ -44,16 +45,16 @@ func getMonitors(conn volumeservice.VolumeConnection) []string { return mons } -func getRbdSecretName(ctx provisionCtx) string { - return fmt.Sprintf("%s-cephx-secret", *ctx.Options.PVC.Spec.StorageClassName) +func getRbdSecretName(pvc *v1.PersistentVolumeClaim) string { + return fmt.Sprintf("%s-cephx-secret", *pvc.Spec.StorageClassName) } -func (m *rbdMapper) BuildPVSource(ctx provisionCtx) (*v1.PersistentVolumeSource, error) { - mons := getMonitors(ctx.Connection) +func (m *rbdMapper) BuildPVSource(conn volumeservice.VolumeConnection, options controller.VolumeOptions) (*v1.PersistentVolumeSource, error) { + mons := getMonitors(conn) if mons == nil { return nil, errors.New("No monitors could be parsed from connection info") } - splitName := strings.SplitN(ctx.Connection.Data.Name, "/", 2) + splitName := strings.SplitN(conn.Data.Name, "/", 2) if len(splitName) != 2 { return nil, errors.New("Field 'name' cannot be split into pool and image") } @@ -63,18 +64,18 @@ func (m *rbdMapper) BuildPVSource(ctx provisionCtx) (*v1.PersistentVolumeSource, CephMonitors: mons, RBDPool: splitName[0], RBDImage: splitName[1], - RadosUser: ctx.Connection.Data.AuthUsername, + RadosUser: conn.Data.AuthUsername, SecretRef: &v1.LocalObjectReference{ - Name: getRbdSecretName(ctx), + Name: getRbdSecretName(options.PVC), }, }, }, nil } -func (m *rbdMapper) AuthSetup(ctx provisionCtx) error { +func (m *rbdMapper) AuthSetup(p *cinderProvisioner, options controller.VolumeOptions, conn volumeservice.VolumeConnection) error { return nil } -func (m *rbdMapper) AuthTeardown(ctx deleteCtx) error { +func (m *rbdMapper) AuthTeardown(p *cinderProvisioner, pv *v1.PersistentVolume) error { return nil } From 8533355ac552a61e3b47c6cb5053648162605e92 Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Thu, 21 Sep 2017 20:47:15 -0400 Subject: [PATCH 10/68] Introduce interface brokers In order to facilitate testing and separation of concerns, add a few broker interfaces which can be swapped at test time for mock implementations. clusterBroker contains methods that interact with the k8s API. - createSecret - deleteSecret mapperBroker contains methods that interact with volumeMappers - newVolumeMapperFromConnection - newVolumeMapperFromPV - buildPV volumeServiceBroker contains methods that interact with cinder - CreateCinderVolume - WaitForAvailableCinderVolume - ReserveCinderVolume - ConnectCinderVolume - DisconnectCinderVolume - UnreserveCinderVolume - DeleteCinderVolume Signed-off-by: Adam Litke --- .../pkg/provisioner/clusterbroker.go | 51 +++++++++++++++ .../pkg/provisioner/iscsi.go | 21 +----- .../pkg/provisioner/mapper.go | 4 +- .../pkg/provisioner/mapperbroker.go | 45 +++++++++++++ .../pkg/provisioner/provisioner.go | 25 ++++--- .../pkg/provisioner/vsbroker.go | 65 +++++++++++++++++++ 6 files changed, 181 insertions(+), 30 deletions(-) create mode 100644 openstack/standalone-cinder/pkg/provisioner/clusterbroker.go create mode 100644 openstack/standalone-cinder/pkg/provisioner/mapperbroker.go create mode 100644 openstack/standalone-cinder/pkg/provisioner/vsbroker.go diff --git a/openstack/standalone-cinder/pkg/provisioner/clusterbroker.go b/openstack/standalone-cinder/pkg/provisioner/clusterbroker.go new file mode 100644 index 00000000000..46bbdf38a97 --- /dev/null +++ b/openstack/standalone-cinder/pkg/provisioner/clusterbroker.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "github.com/golang/glog" + "k8s.io/api/core/v1" +) + +type clusterBroker interface { + createSecret(p *cinderProvisioner, ns string, secret *v1.Secret) error + deleteSecret(p *cinderProvisioner, ns string, secretName string) error +} + +type k8sClusterBroker struct { + clusterBroker +} + +func (k8sClusterBroker) createSecret(p *cinderProvisioner, ns string, secret *v1.Secret) error { + _, err := p.Client.CoreV1().Secrets(ns).Create(secret) + if err != nil { + glog.Errorf("Failed to create chap secret in namespace %s: %v", ns, err) + return err + } + glog.V(3).Infof("Secret %s created", secret.ObjectMeta.Name) + return nil +} + +func (*k8sClusterBroker) deleteSecret(p *cinderProvisioner, ns string, secretName string) error { + err := p.Client.CoreV1().Secrets(ns).Delete(secretName, nil) + if err != nil { + glog.Errorf("Failed to remove secret %s from namespace %s: %v", secretName, ns, err) + return err + } + glog.V(3).Infof("Successfully deleted secret %s", secretName) + return nil +} diff --git a/openstack/standalone-cinder/pkg/provisioner/iscsi.go b/openstack/standalone-cinder/pkg/provisioner/iscsi.go index 93199459969..15498a68d2a 100644 --- a/openstack/standalone-cinder/pkg/provisioner/iscsi.go +++ b/openstack/standalone-cinder/pkg/provisioner/iscsi.go @@ -28,6 +28,7 @@ const iscsiType = "iscsi" type iscsiMapper struct { volumeMapper + cb clusterBroker } func getChapSecretName(connection volumeservice.VolumeConnection, options controller.VolumeOptions) string { @@ -57,16 +58,6 @@ func (m *iscsiMapper) BuildPVSource(conn volumeservice.VolumeConnection, options return ret, nil } -func createSecret(p *cinderProvisioner, ns string, secret *v1.Secret) error { - _, err := p.Client.CoreV1().Secrets(ns).Create(secret) - if err != nil { - glog.Errorf("Failed to create chap secret in namespace %s: %v", ns, err) - return err - } - glog.V(3).Infof("Secret %s created", secret.ObjectMeta.Name) - return nil -} - func (m *iscsiMapper) AuthSetup(p *cinderProvisioner, options controller.VolumeOptions, conn volumeservice.VolumeConnection) error { // Create a secret for the CHAP credentials secretName := getChapSecretName(conn, options) @@ -85,7 +76,7 @@ func (m *iscsiMapper) AuthSetup(p *cinderProvisioner, options controller.VolumeO }, } namespace := options.PVC.Namespace - return createSecret(p, namespace, secret) + return m.cb.createSecret(p, namespace, secret) } func (m *iscsiMapper) AuthTeardown(p *cinderProvisioner, pv *v1.PersistentVolume) error { @@ -97,11 +88,5 @@ func (m *iscsiMapper) AuthTeardown(p *cinderProvisioner, pv *v1.PersistentVolume secretName := pv.Spec.ISCSI.SecretRef.Name secretNamespace := pv.Spec.ClaimRef.Namespace - err := p.Client.CoreV1().Secrets(secretNamespace).Delete(secretName, nil) - if err != nil { - glog.Errorf("Failed to remove secret %s from namespace %s: %v", secretName, secretNamespace, err) - return err - } - glog.V(3).Infof("Successfully deleted secret %s", secretName) - return nil + return m.cb.deleteSecret(p, secretNamespace, secretName) } diff --git a/openstack/standalone-cinder/pkg/provisioner/mapper.go b/openstack/standalone-cinder/pkg/provisioner/mapper.go index 93a122eb66c..b5aa4467cb8 100644 --- a/openstack/standalone-cinder/pkg/provisioner/mapper.go +++ b/openstack/standalone-cinder/pkg/provisioner/mapper.go @@ -39,7 +39,7 @@ func newVolumeMapperFromConnection(conn volumeservice.VolumeConnection) (volumeM msg := fmt.Sprintf("Unsupported persistent volume type: %s", conn.DriverVolumeType) return nil, errors.New(msg) case iscsiType: - return new(iscsiMapper), nil + return &iscsiMapper{cb: &k8sClusterBroker{}}, nil case rbdType: return new(rbdMapper), nil } @@ -47,7 +47,7 @@ func newVolumeMapperFromConnection(conn volumeservice.VolumeConnection) (volumeM func newVolumeMapperFromPV(pv *v1.PersistentVolume) (volumeMapper, error) { if pv.Spec.ISCSI != nil { - return new(iscsiMapper), nil + return &iscsiMapper{cb: &k8sClusterBroker{}}, nil } else if pv.Spec.RBD != nil { return new(rbdMapper), nil } else { diff --git a/openstack/standalone-cinder/pkg/provisioner/mapperbroker.go b/openstack/standalone-cinder/pkg/provisioner/mapperbroker.go new file mode 100644 index 00000000000..0a2551700c8 --- /dev/null +++ b/openstack/standalone-cinder/pkg/provisioner/mapperbroker.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" + "k8s.io/api/core/v1" +) + +type mapperBroker interface { + newVolumeMapperFromConnection(conn volumeservice.VolumeConnection) (volumeMapper, error) + newVolumeMapperFromPV(pv *v1.PersistentVolume) (volumeMapper, error) + buildPV(m volumeMapper, p *cinderProvisioner, options controller.VolumeOptions, conn volumeservice.VolumeConnection, volumeID string) (*v1.PersistentVolume, error) +} + +type volumeMapperBroker struct { + mapperBroker +} + +func (mb *volumeMapperBroker) newVolumeMapperFromConnection(conn volumeservice.VolumeConnection) (volumeMapper, error) { + return newVolumeMapperFromConnection(conn) +} + +func (mb *volumeMapperBroker) newVolumeMapperFromPV(pv *v1.PersistentVolume) (volumeMapper, error) { + return newVolumeMapperFromPV(pv) +} + +func (mb *volumeMapperBroker) buildPV(m volumeMapper, p *cinderProvisioner, options controller.VolumeOptions, conn volumeservice.VolumeConnection, volumeID string) (*v1.PersistentVolume, error) { + return buildPV(m, p, options, conn, volumeID) +} diff --git a/openstack/standalone-cinder/pkg/provisioner/provisioner.go b/openstack/standalone-cinder/pkg/provisioner/provisioner.go index f9879feb180..42fff731cd9 100644 --- a/openstack/standalone-cinder/pkg/provisioner/provisioner.go +++ b/openstack/standalone-cinder/pkg/provisioner/provisioner.go @@ -48,6 +48,9 @@ type cinderProvisioner struct { // Identity of this cinderProvisioner, generated. Used to identify "this" // provisioner's PVs. Identity string + + vsb volumeServiceBroker + mb mapperBroker } // NewCinderProvisioner returns a Provisioner that creates volumes using a @@ -63,6 +66,8 @@ func NewCinderProvisioner(client kubernetes.Interface, id, configFilePath string VolumeService: volumeService, Client: client, Identity: id, + vsb: &gophercloudBroker{}, + mb: &volumeMapperBroker{}, }, nil } @@ -72,33 +77,33 @@ func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.Per return nil, fmt.Errorf("claim Selector is not supported") } - volumeID, err := volumeservice.CreateCinderVolume(p.VolumeService, options) + volumeID, err := p.vsb.CreateCinderVolume(p.VolumeService, options) if err != nil { glog.Errorf("Failed to create volume") return nil, err } - err = volumeservice.WaitForAvailableCinderVolume(p.VolumeService, volumeID) + err = p.vsb.WaitForAvailableCinderVolume(p.VolumeService, volumeID) if err != nil { glog.Errorf("Volume did not become available") return nil, err } - err = volumeservice.ReserveCinderVolume(p.VolumeService, volumeID) + err = p.vsb.ReserveCinderVolume(p.VolumeService, volumeID) if err != nil { // TODO: Create placeholder PV? glog.Errorf("Failed to reserve volume: %v", err) return nil, err } - connection, err := volumeservice.ConnectCinderVolume(p.VolumeService, volumeID) + connection, err := p.vsb.ConnectCinderVolume(p.VolumeService, volumeID) if err != nil { // TODO: Create placeholder PV? glog.Errorf("Failed to connect volume: %v", err) return nil, err } - mapper, err := newVolumeMapperFromConnection(connection) + mapper, err := p.mb.newVolumeMapperFromConnection(connection) if err != nil { // TODO: Create placeholder PV? glog.Errorf("Unable to create volume mapper: %f", err) @@ -112,7 +117,7 @@ func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.Per return nil, err } - pv, err := buildPV(mapper, p, options, connection, volumeID) + pv, err := p.mb.buildPV(mapper, p, options, connection, volumeID) if err != nil { // TODO: Create placeholder PV? glog.Errorf("Failed to build PV: %v", err) @@ -142,26 +147,26 @@ func (p *cinderProvisioner) Delete(pv *v1.PersistentVolume) error { return errors.New(CinderVolumeID + " annotation not found on PV") } - mapper, err := newVolumeMapperFromPV(pv) + mapper, err := p.mb.newVolumeMapperFromPV(pv) if err != nil { return err } mapper.AuthTeardown(p, pv) - err = volumeservice.DisconnectCinderVolume(p.VolumeService, volumeID) + err = p.vsb.DisconnectCinderVolume(p.VolumeService, volumeID) if err != nil { return err } - err = volumeservice.UnreserveCinderVolume(p.VolumeService, volumeID) + err = p.vsb.UnreserveCinderVolume(p.VolumeService, volumeID) if err != nil { // TODO: Create placeholder PV? glog.Errorf("Failed to unreserve volume: %v", err) return err } - err = volumeservice.DeleteCinderVolume(p.VolumeService, volumeID) + err = p.vsb.DeleteCinderVolume(p.VolumeService, volumeID) if err != nil { return err } diff --git a/openstack/standalone-cinder/pkg/provisioner/vsbroker.go b/openstack/standalone-cinder/pkg/provisioner/vsbroker.go new file mode 100644 index 00000000000..005c46f8262 --- /dev/null +++ b/openstack/standalone-cinder/pkg/provisioner/vsbroker.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "github.com/gophercloud/gophercloud" + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" +) + +type volumeServiceBroker interface { + CreateCinderVolume(vs *gophercloud.ServiceClient, options controller.VolumeOptions) (string, error) + WaitForAvailableCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error + ReserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error + ConnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) (volumeservice.VolumeConnection, error) + DisconnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error + UnreserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error + DeleteCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error +} + +type gophercloudBroker struct { + volumeServiceBroker +} + +func (vsb *gophercloudBroker) CreateCinderVolume(vs *gophercloud.ServiceClient, options controller.VolumeOptions) (string, error) { + return volumeservice.CreateCinderVolume(vs, options) +} + +func (vsb *gophercloudBroker) WaitForAvailableCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return volumeservice.WaitForAvailableCinderVolume(vs, volumeID) +} + +func (vsb *gophercloudBroker) ReserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return volumeservice.ReserveCinderVolume(vs, volumeID) +} + +func (vsb *gophercloudBroker) ConnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) (volumeservice.VolumeConnection, error) { + return volumeservice.ConnectCinderVolume(vs, volumeID) +} + +func (vsb *gophercloudBroker) DisconnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return volumeservice.DisconnectCinderVolume(vs, volumeID) +} + +func (vsb *gophercloudBroker) UnreserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return volumeservice.UnreserveCinderVolume(vs, volumeID) +} + +func (vsb *gophercloudBroker) DeleteCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return volumeservice.DeleteCinderVolume(vs, volumeID) +} From 7ee600a8db65cc387e180fbfa55fd452ed1343f3 Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Tue, 26 Sep 2017 15:33:04 -0400 Subject: [PATCH 11/68] Add tests for the provisioner package Signed-off-by: Adam Litke --- .../pkg/provisioner/iscsi_test.go | 216 ++++++++++++ .../pkg/provisioner/mapper_test.go | 191 +++++++++++ .../pkg/provisioner/provisioner_suite_test.go | 29 ++ .../pkg/provisioner/provisioner_test.go | 317 ++++++++++++++++++ .../pkg/provisioner/rbd_test.go | 170 ++++++++++ .../pkg/provisioner/testutils_test.go | 129 +++++++ 6 files changed, 1052 insertions(+) create mode 100644 openstack/standalone-cinder/pkg/provisioner/iscsi_test.go create mode 100644 openstack/standalone-cinder/pkg/provisioner/mapper_test.go create mode 100644 openstack/standalone-cinder/pkg/provisioner/provisioner_suite_test.go create mode 100644 openstack/standalone-cinder/pkg/provisioner/provisioner_test.go create mode 100644 openstack/standalone-cinder/pkg/provisioner/rbd_test.go create mode 100644 openstack/standalone-cinder/pkg/provisioner/testutils_test.go diff --git a/openstack/standalone-cinder/pkg/provisioner/iscsi_test.go b/openstack/standalone-cinder/pkg/provisioner/iscsi_test.go new file mode 100644 index 00000000000..0ee9d557dcc --- /dev/null +++ b/openstack/standalone-cinder/pkg/provisioner/iscsi_test.go @@ -0,0 +1,216 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/api/core/v1" +) + +var _ = Describe("Iscsi Mapper", func() { + Describe("the CHAP secret name", func() { + var ( + secretName string + conn volumeservice.VolumeConnection + ) + options := createVolumeOptions() + + JustBeforeEach(func() { + secretName = getChapSecretName(conn, options) + }) + + Context("when the authentication method is CHAP", func() { + BeforeEach(func() { + conn.Data.AuthMethod = "CHAP" + }) + It("should be based on the PV name", func() { + Expect(secretName).To(Equal("testpv-secret")) + }) + }) + + Context("when the authentication method is not CHAP", func() { + BeforeEach(func() { + conn.Data.AuthMethod = "not chap" + }) + It("should be blank", func() { + Expect(secretName).To(Equal("")) + }) + }) + }) + + Describe("the persistent volume source", func() { + var ( + mapper iscsiMapper + err error + source *v1.PersistentVolumeSource + secretName string + conn volumeservice.VolumeConnection + ) + options := controller.VolumeOptions{ + PVName: "myPV", + } + + BeforeEach(func() { + conn = createIscsiConnectionInfo() + mapper = iscsiMapper{} + }) + + JustBeforeEach(func() { + secretName = getChapSecretName(conn, options) + source, err = mapper.BuildPVSource(conn, options) + }) + + It("should be populated with iscsi connection info", func() { + Expect(source.ISCSI.TargetPortal).To(Equal("portal")) + Expect(source.ISCSI.IQN).To(Equal("iqn")) + Expect(source.ISCSI.Lun).To(Equal(int32(3))) + Expect(source.ISCSI.SessionCHAPAuth).To(BeFalse()) + }) + + Context("when CHAP authentication is enabled", func() { + BeforeEach(func() { + conn.Data.AuthMethod = "CHAP" + }) + It("should contain a reference to a CHAP secret", func() { + Expect(source.ISCSI.SessionCHAPAuth).To(BeTrue()) + Expect(source.ISCSI.SecretRef).To(Not(BeNil())) + Expect(source.ISCSI.SecretRef.Name).To(Equal(secretName)) + }) + }) + }) + + Describe("Authorization setup", func() { + var ( + cb *fakeClusterBroker + mapper iscsiMapper + conn volumeservice.VolumeConnection + options controller.VolumeOptions + err error + ) + + BeforeEach(func() { + cb = &fakeClusterBroker{} + mapper = iscsiMapper{cb: cb} + conn = createIscsiConnectionInfo() + options = createVolumeOptions() + }) + + JustBeforeEach(func() { + err = mapper.AuthSetup(&cinderProvisioner{}, options, conn) + }) + + Context("when the connection supplies CHAP credentials", func() { + BeforeEach(func() { + conn.Data.AuthMethod = "CHAP" + conn.Data.AuthUsername = "user" + conn.Data.AuthPassword = "pass" + }) + + It("should create a CHAP secret", func() { + Expect(cb.CreatedSecret).To(Not(BeNil())) + Expect(cb.CreatedSecret.Type).To(Equal(v1.SecretType("kubernetes.io/iscsi-chap"))) + Expect(cb.CreatedSecret.Name).To(Equal("testpv-secret")) + user := string(cb.CreatedSecret.Data["node.session.auth.username"][:]) + pass := string(cb.CreatedSecret.Data["node.session.auth.password"][:]) + Expect(user).To(Equal("user")) + Expect(pass).To(Equal("pass")) + }) + + It("should target the namespace where the PVC resides", func() { + Expect(cb.Namespace).To(Equal(options.PVC.Namespace)) + }) + }) + + Context("when the connection does not require CHAP authentication", func() { + It("should not create a CHAP secret", func() { + Expect(cb.CreatedSecret).To(BeNil()) + }) + }) + }) + + Describe("Authorization Teardown", func() { + var ( + cb *fakeClusterBroker + mapper iscsiMapper + pv *v1.PersistentVolume + err error + ) + + BeforeEach(func() { + pv = createPersistentVolume() + pv.Spec.PersistentVolumeSource.ISCSI = &v1.ISCSIVolumeSource{} + }) + + JustBeforeEach(func() { + cb = &fakeClusterBroker{} + mapper = iscsiMapper{cb: cb} + err = mapper.AuthTeardown(&cinderProvisioner{}, pv) + }) + + Context("when the PV contains a secret reference", func() { + BeforeEach(func() { + pv.Spec.ISCSI.SecretRef = &v1.LocalObjectReference{ + Name: "secretName", + } + }) + + It("should delete the secret", func() { + Expect(cb.DeletedSecret).To(Equal("secretName")) + Expect(cb.Namespace).To(Equal("testNs")) + }) + }) + + Context("when the PV does not contain a secret reference", func() { + It("should not delete any secret", func() { + Expect(cb.DeletedSecret).To(BeEmpty()) + }) + }) + }) +}) + +func createIscsiConnectionInfo() volumeservice.VolumeConnection { + return volumeservice.VolumeConnection{ + DriverVolumeType: iscsiType, + Data: volumeservice.VolumeConnectionDetails{ + TargetPortal: "portal", + TargetIqn: "iqn", + TargetLun: 3, + }, + } +} + +type fakeClusterBroker struct { + clusterBroker + CreatedSecret *v1.Secret + DeletedSecret string + Namespace string +} + +func (cb *fakeClusterBroker) createSecret(p *cinderProvisioner, ns string, secret *v1.Secret) error { + cb.CreatedSecret = secret + cb.Namespace = ns + return nil +} + +func (cb *fakeClusterBroker) deleteSecret(p *cinderProvisioner, ns string, secretName string) error { + cb.DeletedSecret = secretName + cb.Namespace = ns + return nil +} diff --git a/openstack/standalone-cinder/pkg/provisioner/mapper_test.go b/openstack/standalone-cinder/pkg/provisioner/mapper_test.go new file mode 100644 index 00000000000..02650c95242 --- /dev/null +++ b/openstack/standalone-cinder/pkg/provisioner/mapper_test.go @@ -0,0 +1,191 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +var _ = Describe("Mapper", func() { + Describe("creating a volume mapper from cinder connection information", func() { + var ( + conn volumeservice.VolumeConnection + mapper volumeMapper + err error + ) + + JustBeforeEach(func() { + mapper, err = newVolumeMapperFromConnection(conn) + }) + + Context("when the connection type is iscsi", func() { + BeforeEach(func() { + conn = volumeservice.VolumeConnection{ + DriverVolumeType: iscsiType, + } + }) + + It("should yield an iscsiMapper", func() { + Expect(mapper).Should(BeAssignableToTypeOf(&iscsiMapper{})) + }) + }) + + Context("when the connection type is rbd", func() { + BeforeEach(func() { + conn = volumeservice.VolumeConnection{ + DriverVolumeType: rbdType, + } + }) + + It("should yield an rbdMapper", func() { + Expect(mapper).Should(BeAssignableToTypeOf(&rbdMapper{})) + }) + }) + + Context("when the connection type is not supported", func() { + BeforeEach(func() { + conn = volumeservice.VolumeConnection{ + DriverVolumeType: "Unsupported", + } + }) + + It("should yield nil", func() { + Expect(mapper).Should(BeNil()) + }) + + It("should produce an error", func() { + Expect(err).Should(Not(BeNil())) + }) + }) + }) + + Describe("creating a volume mapper from a PV", func() { + var ( + pv v1.PersistentVolume + source v1.PersistentVolumeSource + mapper volumeMapper + err error + ) + + BeforeEach(func() { + pv = v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{}, + } + }) + + JustBeforeEach(func() { + pv.Spec.PersistentVolumeSource = source + mapper, err = newVolumeMapperFromPV(&pv) + }) + + Context("with an ISCSI PV", func() { + BeforeEach(func() { + source = v1.PersistentVolumeSource{ + ISCSI: &v1.ISCSIVolumeSource{}, + } + }) + + It("should yield an iscsiMapper", func() { + Expect(mapper).Should(BeAssignableToTypeOf(&iscsiMapper{})) + }) + }) + + Context("with an RBD PV", func() { + BeforeEach(func() { + source = v1.PersistentVolumeSource{ + RBD: &v1.RBDVolumeSource{}, + } + }) + + It("should yield an iscsiMapper", func() { + Expect(mapper).Should(BeAssignableToTypeOf(&rbdMapper{})) + }) + }) + + Context("with an unsupported persistent volume source", func() { + BeforeEach(func() { + source = v1.PersistentVolumeSource{} + }) + + It("should yield nil", func() { + Expect(mapper).Should(BeNil()) + }) + + It("should produce an error", func() { + Expect(err).Should(Not(BeNil())) + }) + }) + }) + + Describe("Building a persistent volume", func() { + var ( + mapper *fakeMapper + pv *v1.PersistentVolume + err error + ) + p := createCinderProvisioner() + options := createVolumeOptions() + conn := volumeservice.VolumeConnection{} + volumeID := "volumeid" + + BeforeEach(func() { + mapper = &fakeMapper{} + }) + + JustBeforeEach(func() { + pv, err = buildPV(mapper, p, options, conn, volumeID) + }) + + Context("when building the PV source fails", func() { + BeforeEach(func() { + mapper.failBuildPVSource = true + }) + It("should also fail", func() { + Expect(pv).To(BeNil()) + Expect(err).To(Not(BeNil())) + }) + }) + + It("should yield a valid PV associated with this provisioner", func() { + annotations := map[string]string{ + ProvisionerIDAnn: "identity", + CinderVolumeID: "volumeid", + } + accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} + quantity, parseErr := resource.ParseQuantity("1Gi") + Expect(parseErr).To(BeNil()) + capacity := v1.ResourceList{ + v1.ResourceStorage: quantity, + } + + Expect(pv).To(Not(BeNil())) + Expect(err).To(BeNil()) + Expect(pv.Name).To(Equal("testpv")) + Expect(pv.Namespace).To(Equal("testns")) + Expect(pv.Annotations).To(Equal(annotations)) + Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete)) + Expect(pv.Spec.AccessModes).To(Equal(accessModes)) + Expect(pv.Spec.Capacity).To(Equal(capacity)) + Expect(pv.Spec.PersistentVolumeSource).To(Not(BeNil())) + }) + }) +}) diff --git a/openstack/standalone-cinder/pkg/provisioner/provisioner_suite_test.go b/openstack/standalone-cinder/pkg/provisioner/provisioner_suite_test.go new file mode 100644 index 00000000000..2d301db8580 --- /dev/null +++ b/openstack/standalone-cinder/pkg/provisioner/provisioner_suite_test.go @@ -0,0 +1,29 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestProvisioner(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Provisioner Suite") +} diff --git a/openstack/standalone-cinder/pkg/provisioner/provisioner_test.go b/openstack/standalone-cinder/pkg/provisioner/provisioner_test.go new file mode 100644 index 00000000000..b6fb6d2891b --- /dev/null +++ b/openstack/standalone-cinder/pkg/provisioner/provisioner_test.go @@ -0,0 +1,317 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "errors" + + "github.com/gophercloud/gophercloud" + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("Provisioner", func() { + Describe("A provision operation", func() { + var ( + pv *v1.PersistentVolume + p *cinderProvisioner + vsb *fakeVolumeServiceBroker + mb *fakeMapperBroker + options controller.VolumeOptions + err error + ) + + BeforeEach(func() { + vsb = &fakeVolumeServiceBroker{} + mb = newFakeMapperBroker() + p = createCinderProvisioner() + p.vsb = vsb + p.mb = mb + options = createVolumeOptions() + }) + + JustBeforeEach(func() { + pv, err = p.Provision(options) + }) + + It("should return a persistent volume", func() { + Expect(pv).To(Not(BeNil())) + Expect(err).To(BeNil()) + }) + + Context("when a claim selector is specified", func() { + BeforeEach(func() { + options.PVC.Spec.Selector = &metav1.LabelSelector{} + }) + + It("should fail", func() { + Expect(pv).To(BeNil()) + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when creating a volume fails", func() { + BeforeEach(func() { + vsb.mightFail.set("CreateCinderVolume") + }) + It("should fail", func() { + Expect(pv).To(BeNil()) + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when the volume does not become available", func() { + BeforeEach(func() { + vsb.mightFail.set("WaitForAvailableCinderVolume") + }) + It("should fail", func() { + Expect(pv).To(BeNil()) + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when reserving the volume fails", func() { + BeforeEach(func() { + vsb.mightFail.set("ReserveCinderVolume") + }) + It("should fail", func() { + Expect(pv).To(BeNil()) + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when connecting the volume fails", func() { + BeforeEach(func() { + vsb.mightFail.set("ConnectCinderVolume") + }) + It("should fail", func() { + Expect(pv).To(BeNil()) + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when getting a volumeMapper fails", func() { + BeforeEach(func() { + mb.mightFail.set("NewVolumeMapperFromConnection") + }) + It("should fail", func() { + Expect(pv).To(BeNil()) + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when preparing volume authentication fails", func() { + BeforeEach(func() { + mb.FakeVolumeMapper.mightFail.set("AuthSetup") + }) + It("should fail", func() { + Expect(pv).To(BeNil()) + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when building the PV fails", func() { + BeforeEach(func() { + mb.mightFail.set("BuildPV") + }) + It("should fail", func() { + Expect(pv).To(BeNil()) + Expect(err).To(Not(BeNil())) + }) + }) + }) + + Describe("A delete operation", func() { + var ( + err error + vsb *fakeVolumeServiceBroker + mb *fakeMapperBroker + p *cinderProvisioner + pv *v1.PersistentVolume + ) + + BeforeEach(func() { + vsb = &fakeVolumeServiceBroker{} + mb = newFakeMapperBroker() + p = createCinderProvisioner() + p.vsb = vsb + p.mb = mb + pv = createPersistentVolume() + }) + + JustBeforeEach(func() { + err = p.Delete(pv) + }) + + It("should complete successfully", func() { + Expect(err).To(BeNil()) + }) + + Context("when the provisioner ID annotation is missing from the PV", func() { + BeforeEach(func() { + delete(pv.Annotations, ProvisionerIDAnn) + }) + + It("should fail", func() { + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when the provisioner ID annotation does not match our provisioner", func() { + BeforeEach(func() { + pv.Annotations[ProvisionerIDAnn] = "a different provisioner" + }) + + It("should fail with an IgnoredError", func() { + Expect(err).To(Not(BeNil())) + Expect(err).To(BeAssignableToTypeOf(&controller.IgnoredError{})) + }) + }) + + Context("when the cinder volume ID annotation is missing from the PV", func() { + BeforeEach(func() { + delete(pv.Annotations, CinderVolumeID) + }) + + It("should fail", func() { + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when getting a volumeMapper fails", func() { + BeforeEach(func() { + mb.mightFail.set("NewVolumeMapperFromPV") + }) + It("should fail", func() { + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when teardown of volume authentication fails", func() { + BeforeEach(func() { + mb.FakeVolumeMapper.mightFail.set("AuthTeardown") + }) + It("should still succeed", func() { + Expect(err).To(BeNil()) + }) + }) + + Context("when disconnecting the volume fails", func() { + BeforeEach(func() { + vsb.mightFail.set("DisconnectCinderVolume") + }) + It("should fail", func() { + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when unreserving the volume fails", func() { + BeforeEach(func() { + vsb.mightFail.set("UnreserveCinderVolume") + }) + It("should fail", func() { + Expect(err).To(Not(BeNil())) + }) + }) + + Context("when deleting the volume fails", func() { + BeforeEach(func() { + vsb.mightFail.set("DeleteCinderVolume") + }) + It("should fail", func() { + Expect(err).To(Not(BeNil())) + }) + }) + }) +}) + +type fakeVolumeServiceBroker struct { + mightFail failureInjector + volumeServiceBroker +} + +func (vsb *fakeVolumeServiceBroker) CreateCinderVolume(vs *gophercloud.ServiceClient, options controller.VolumeOptions) (string, error) { + if vsb.mightFail.isSet("CreateCinderVolume") { + return "", errors.New("injected error for testing") + } + return "cinderVolumeID", nil +} + +func (vsb *fakeVolumeServiceBroker) WaitForAvailableCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return vsb.mightFail.ret("WaitForAvailableCinderVolume") +} + +func (vsb *fakeVolumeServiceBroker) ReserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return vsb.mightFail.ret("ReserveCinderVolume") +} + +func (vsb *fakeVolumeServiceBroker) ConnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) (volumeservice.VolumeConnection, error) { + if vsb.mightFail.isSet("ConnectCinderVolume") { + return volumeservice.VolumeConnection{}, errors.New("injected error for testing") + } + return volumeservice.VolumeConnection{}, nil +} + +func (vsb *fakeVolumeServiceBroker) DisconnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return vsb.mightFail.ret("DisconnectCinderVolume") +} + +func (vsb *fakeVolumeServiceBroker) UnreserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return vsb.mightFail.ret("UnreserveCinderVolume") +} + +func (vsb *fakeVolumeServiceBroker) DeleteCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return vsb.mightFail.ret("DeleteCinderVolume") +} + +type fakeMapperBroker struct { + mightFail failureInjector + FakeVolumeMapper *fakeMapper + + mapperBroker +} + +func newFakeMapperBroker() *fakeMapperBroker { + ret := fakeMapperBroker{} + ret.FakeVolumeMapper = &fakeMapper{} + return &ret +} + +func (mb *fakeMapperBroker) newVolumeMapperFromConnection(conn volumeservice.VolumeConnection) (volumeMapper, error) { + if mb.mightFail.isSet("NewVolumeMapperFromConnection") { + return nil, errors.New("injected error for testing") + } + return mb.FakeVolumeMapper, nil +} + +func (mb *fakeMapperBroker) newVolumeMapperFromPV(pv *v1.PersistentVolume) (volumeMapper, error) { + if mb.mightFail.isSet("NewVolumeMapperFromPV") { + return nil, errors.New("injected error for testing") + } + return mb.FakeVolumeMapper, nil +} + +func (mb *fakeMapperBroker) buildPV(m volumeMapper, p *cinderProvisioner, options controller.VolumeOptions, conn volumeservice.VolumeConnection, volumeID string) (*v1.PersistentVolume, error) { + if mb.mightFail.isSet("BuildPV") { + return nil, errors.New("injected error for testing") + } + return &v1.PersistentVolume{}, nil +} diff --git a/openstack/standalone-cinder/pkg/provisioner/rbd_test.go b/openstack/standalone-cinder/pkg/provisioner/rbd_test.go new file mode 100644 index 00000000000..12301ed46ae --- /dev/null +++ b/openstack/standalone-cinder/pkg/provisioner/rbd_test.go @@ -0,0 +1,170 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/api/core/v1" +) + +var _ = Describe("Rbd Mapper", func() { + Describe("Parsing ceph monitor information", func() { + var ( + conn volumeservice.VolumeConnection + monitors []string + ) + + BeforeEach(func() { + conn = createIscsiConnectionInfo() + }) + + JustBeforeEach(func() { + monitors = getMonitors(conn) + }) + + Context("when no monitors are given", func() { + It("should return an empty list", func() { + Expect(monitors).To(BeEmpty()) + }) + }) + + Context("when the number of hosts and ports are not equal", func() { + BeforeEach(func() { + conn.Data.Hosts = []string{"hostA"} + conn.Data.Ports = []string{"1", "2"} + }) + + It("should return nil", func() { + Expect(monitors).To(BeNil()) + }) + }) + + Context("when the lists are of equal length", func() { + BeforeEach(func() { + conn.Data.Hosts = []string{"hostA", "hostB"} + conn.Data.Ports = []string{"1", "2"} + }) + + It("should merge them into a single list", func() { + expected := []string{"hostA:1", "hostB:2"} + Expect(monitors).To(Equal(expected)) + }) + }) + }) + + Describe("the persistent volume source", func() { + var ( + mapper rbdMapper + conn volumeservice.VolumeConnection + options controller.VolumeOptions + source *v1.PersistentVolumeSource + err error + ) + + BeforeEach(func() { + conn = createRbdConnectionInfo() + options = createVolumeOptions() + mapper = rbdMapper{} + }) + + JustBeforeEach(func() { + source, err = mapper.BuildPVSource(conn, options) + }) + + Context("when the connection information is valid", func() { + BeforeEach(func() { + conn.Data.Hosts = []string{"hostA", "hostB"} + conn.Data.Ports = []string{"1", "2"} + conn.Data.Name = "pool/image" + }) + + It("should be populated with the RBD connection info", func() { + expectedMonitors := []string{"hostA:1", "hostB:2"} + Expect(source.RBD.CephMonitors).To(Equal(expectedMonitors)) + Expect(source.RBD.RBDPool).To(Equal("pool")) + Expect(source.RBD.RBDImage).To(Equal("image")) + Expect(source.RBD.RadosUser).To(Equal("admin")) + Expect(source.RBD.SecretRef.Name).To(Equal("storageclass-cephx-secret")) + }) + }) + + Context("when the connection field 'name' cannot be split into pool and image", func() { + BeforeEach(func() { + conn.Data.Name = "has-no-slash" + }) + + It("should be nil", func() { + Expect(source).To(BeNil()) + }) + }) + + Context("when no monitors can be parsed", func() { + BeforeEach(func() { + conn.Data.Hosts = []string{"hostA", "hostB"} + conn.Data.Ports = []string{"1"} + }) + + It("should be nil", func() { + Expect(source).To(BeNil()) + }) + }) + + Context("When the connection field 'name' contains an extra '/'", func() { + BeforeEach(func() { + conn.Data.Name = "pool/image/foo" + }) + + It("should have an image field containing a '/'", func() { + Expect(source.RBD.RBDImage).To(Equal("image/foo")) + }) + }) + }) + + Describe("Authorization", func() { + var ( + mapper rbdMapper + err error + ) + + Context("when called to setup", func() { + It("should do nothing and always succeed", func() { + err = mapper.AuthSetup(&cinderProvisioner{}, controller.VolumeOptions{}, + volumeservice.VolumeConnection{}) + Expect(err).To(BeNil()) + }) + }) + + Context("when called to tear down", func() { + It("should do nothing and always succeed", func() { + err = mapper.AuthTeardown(&cinderProvisioner{}, &v1.PersistentVolume{}) + Expect(err).To(BeNil()) + }) + }) + }) +}) + +func createRbdConnectionInfo() volumeservice.VolumeConnection { + return volumeservice.VolumeConnection{ + DriverVolumeType: rbdType, + Data: volumeservice.VolumeConnectionDetails{ + AuthUsername: "admin", + }, + } +} diff --git a/openstack/standalone-cinder/pkg/provisioner/testutils_test.go b/openstack/standalone-cinder/pkg/provisioner/testutils_test.go new file mode 100644 index 00000000000..7bb91ffc4ac --- /dev/null +++ b/openstack/standalone-cinder/pkg/provisioner/testutils_test.go @@ -0,0 +1,129 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provisioner + +import ( + "errors" + "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func createVolumeOptions() controller.VolumeOptions { + var storageClass = "storageclass" + + capacity, err := resource.ParseQuantity("1Gi") + if err != nil { + glog.Error("Programmer error, cannot parse quantity string") + return controller.VolumeOptions{} + } + + return controller.VolumeOptions{ + PVName: "testpv", + PVC: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "testns", + }, + Spec: v1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClass, + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): capacity, + }, + }, + }, + }, + PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete, + } +} + +func createPersistentVolume() *v1.PersistentVolume { + return &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + ProvisionerIDAnn: "identity", + CinderVolumeID: "cinderVolumeID", + }, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{}, + ClaimRef: &v1.ObjectReference{ + Namespace: "testNs", + }, + }, + } +} + +func createCinderProvisioner() *cinderProvisioner { + return &cinderProvisioner{ + Identity: "identity", + } +} + +type failureInjector struct { + failOn map[string]bool +} + +func (vsb *failureInjector) set(method string) { + if vsb.failOn == nil { + vsb.failOn = make(map[string]bool) + } + vsb.failOn[method] = true +} + +func (vsb *failureInjector) isSet(method string) bool { + if vsb.failOn == nil { + return false + } + value, ok := vsb.failOn[method] + if !ok { + return false + } + return value +} + +func (vsb *failureInjector) ret(method string) error { + if vsb.isSet(method) { + return errors.New("injected error for testing") + } + return nil +} + +type fakeMapper struct { + mightFail failureInjector + volumeMapper + failBuildPVSource bool +} + +func (m *fakeMapper) BuildPVSource(conn volumeservice.VolumeConnection, options controller.VolumeOptions) (*v1.PersistentVolumeSource, error) { + if m.failBuildPVSource { + return nil, errors.New("Injected error for testing") + } + return &v1.PersistentVolumeSource{}, nil +} + +func (m *fakeMapper) AuthSetup(p *cinderProvisioner, options controller.VolumeOptions, conn volumeservice.VolumeConnection) error { + return m.mightFail.ret("AuthSetup") +} + +func (m *fakeMapper) AuthTeardown(p *cinderProvisioner, pv *v1.PersistentVolume) error { + return m.mightFail.ret("AuthTeardown") +} From f95b6d31c524b205326cd9e560f0629c91a5f99a Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Tue, 26 Sep 2017 13:01:47 -0400 Subject: [PATCH 12/68] standalone-cinder: Add test Makefile targets Signed-off-by: Adam Litke --- Makefile | 8 +++++++- openstack/standalone-cinder/Makefile | 5 ++++- test.sh | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 058631b7eee..1fed5a8c286 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,8 @@ all: aws/efs ceph/cephfs ceph/rbd flex gluster/block gluster/glusterfs iscsi/tar clean: clean-aws/efs clean-ceph/cephfs clean-ceph/rbd clean-flex clean-gluster/block clean-gluster/glusterfs clean-iscsi/targetd clean-local-volume/provisioner clean-local-volume/bootstrapper clean-nfs-client clean-nfs clean-snapshot clean-openstack/standalone-cinder .PHONY: clean -test: test-aws/efs test-local-volume/provisioner test-nfs test-snapshot + +test: test-aws/efs test-local-volume/provisioner test-nfs test-snapshot test-openstack/standalone-cinder .PHONY: test verify: @@ -176,6 +177,11 @@ openstack/standalone-cinder: make .PHONY: openstack/standalone-cinder +test-openstack/standalone-cinder: + cd openstack/standalone-cinder; \ + make test +.PHONY: test-openstack/standalone-cinder + clean-openstack/standalone-cinder: cd openstack/standalone-cinder; \ make clean diff --git a/openstack/standalone-cinder/Makefile b/openstack/standalone-cinder/Makefile index 6fa1fe84118..b9ce202d2cf 100644 --- a/openstack/standalone-cinder/Makefile +++ b/openstack/standalone-cinder/Makefile @@ -23,7 +23,7 @@ endif IMAGE = $(REGISTRY)standalone-cinder-provisioner:$(VERSION) MUTABLE_IMAGE = $(REGISTRY)standalone-cinder-provisioner:latest -.PHONY: all build container +.PHONY: all build container test all: build @@ -34,6 +34,9 @@ container: build docker build -t $(MUTABLE_IMAGE) . docker tag $(MUTABLE_IMAGE) $(IMAGE) +test: + go test ./pkg/provisioner + clean: -rm cinder-provisioner diff --git a/test.sh b/test.sh index c49573e1d08..d98875ccccc 100755 --- a/test.sh +++ b/test.sh @@ -94,6 +94,7 @@ elif [ "$TEST_SUITE" = "everything-else" ]; then make nfs-client make snapshot make test-snapshot + make test-openstack/standalone-cinder elif [ "$TEST_SUITE" = "local-volume" ]; then make local-volume/provisioner make test-local-volume/provisioner From 595e21c6e7dfdd20473aec6bde74937467dc1e75 Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Wed, 27 Sep 2017 12:23:55 -0400 Subject: [PATCH 13/68] standalone-cinder: Rollback when Provision fails If provisioning fails we should attempt to clean up volumes that have been created. Add rollback logic which will disconnect, unreserve, and delete cinder volumes on error as appropriate. Signed-off-by: Adam Litke --- .../pkg/provisioner/provisioner.go | 65 +++++++++++++------ .../pkg/provisioner/provisioner_test.go | 44 ++++++++++--- 2 files changed, 79 insertions(+), 30 deletions(-) diff --git a/openstack/standalone-cinder/pkg/provisioner/provisioner.go b/openstack/standalone-cinder/pkg/provisioner/provisioner.go index 42fff731cd9..ae37810b6ab 100644 --- a/openstack/standalone-cinder/pkg/provisioner/provisioner.go +++ b/openstack/standalone-cinder/pkg/provisioner/provisioner.go @@ -73,6 +73,13 @@ func NewCinderProvisioner(client kubernetes.Interface, id, configFilePath string // Provision creates a storage asset and returns a PV object representing it. func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) { + var ( + connection volumeservice.VolumeConnection + mapper volumeMapper + pv *v1.PersistentVolume + cleanupErr error + ) + if options.PVC.Spec.Selector != nil { return nil, fmt.Errorf("claim Selector is not supported") } @@ -80,51 +87,66 @@ func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.Per volumeID, err := p.vsb.CreateCinderVolume(p.VolumeService, options) if err != nil { glog.Errorf("Failed to create volume") - return nil, err + goto ERROR } err = p.vsb.WaitForAvailableCinderVolume(p.VolumeService, volumeID) if err != nil { - glog.Errorf("Volume did not become available") - return nil, err + glog.Errorf("Volume %s did not become available", volumeID) + goto ERROR_DELETE } err = p.vsb.ReserveCinderVolume(p.VolumeService, volumeID) if err != nil { - // TODO: Create placeholder PV? - glog.Errorf("Failed to reserve volume: %v", err) - return nil, err + glog.Errorf("Failed to reserve volume %s: %v", volumeID, err) + goto ERROR_DELETE } - connection, err := p.vsb.ConnectCinderVolume(p.VolumeService, volumeID) + connection, err = p.vsb.ConnectCinderVolume(p.VolumeService, volumeID) if err != nil { - // TODO: Create placeholder PV? - glog.Errorf("Failed to connect volume: %v", err) - return nil, err + glog.Errorf("Failed to connect volume %s: %v", volumeID, err) + goto ERROR_UNRESERVE } - mapper, err := p.mb.newVolumeMapperFromConnection(connection) + mapper, err = p.mb.newVolumeMapperFromConnection(connection) if err != nil { - // TODO: Create placeholder PV? glog.Errorf("Unable to create volume mapper: %f", err) - return nil, err + goto ERROR_DISCONNECT } err = mapper.AuthSetup(p, options, connection) if err != nil { - // TODO: Create placeholder PV? glog.Errorf("Failed to prepare volume auth: %v", err) - return nil, err + goto ERROR_DISCONNECT } - pv, err := p.mb.buildPV(mapper, p, options, connection, volumeID) + pv, err = p.mb.buildPV(mapper, p, options, connection, volumeID) if err != nil { - // TODO: Create placeholder PV? glog.Errorf("Failed to build PV: %v", err) - return nil, err + goto ERROR_DISCONNECT } - return pv, nil + +ERROR_DISCONNECT: + cleanupErr = p.vsb.DisconnectCinderVolume(p.VolumeService, volumeID) + if cleanupErr != nil { + glog.Errorf("Failed to disconnect volume %s: %v", volumeID, cleanupErr) + } + glog.V(3).Infof("Volume %s disconnected", volumeID) +ERROR_UNRESERVE: + cleanupErr = p.vsb.UnreserveCinderVolume(p.VolumeService, volumeID) + if cleanupErr != nil { + glog.Errorf("Failed to unreserve volume %s: %v", volumeID, cleanupErr) + } + glog.V(3).Infof("Volume %s unreserved", volumeID) +ERROR_DELETE: + cleanupErr = p.vsb.DeleteCinderVolume(p.VolumeService, volumeID) + if cleanupErr != nil { + glog.Errorf("Failed to delete volume %s: %v", volumeID, cleanupErr) + } + glog.V(3).Infof("Volume %s deleted", volumeID) +ERROR: + return nil, err // Return the original error } // Delete removes the storage asset that was created by Provision represented @@ -149,6 +171,7 @@ func (p *cinderProvisioner) Delete(pv *v1.PersistentVolume) error { mapper, err := p.mb.newVolumeMapperFromPV(pv) if err != nil { + glog.Errorf("Failed to instantiate mapper: %s", err) return err } @@ -156,18 +179,20 @@ func (p *cinderProvisioner) Delete(pv *v1.PersistentVolume) error { err = p.vsb.DisconnectCinderVolume(p.VolumeService, volumeID) if err != nil { + glog.Errorf("Failed to disconnect volume %s: %v", volumeID, err) return err } err = p.vsb.UnreserveCinderVolume(p.VolumeService, volumeID) if err != nil { // TODO: Create placeholder PV? - glog.Errorf("Failed to unreserve volume: %v", err) + glog.Errorf("Failed to unreserve volume %s: %v", volumeID, err) return err } err = p.vsb.DeleteCinderVolume(p.VolumeService, volumeID) if err != nil { + glog.Errorf("Failed to delete volume %s: %v", volumeID, err) return err } diff --git a/openstack/standalone-cinder/pkg/provisioner/provisioner_test.go b/openstack/standalone-cinder/pkg/provisioner/provisioner_test.go index b6fb6d2891b..e92566cf46e 100644 --- a/openstack/standalone-cinder/pkg/provisioner/provisioner_test.go +++ b/openstack/standalone-cinder/pkg/provisioner/provisioner_test.go @@ -17,6 +17,7 @@ limitations under the License. package provisioner import ( + "bytes" "errors" "github.com/gophercloud/gophercloud" @@ -36,6 +37,7 @@ var _ = Describe("Provisioner", func() { vsb *fakeVolumeServiceBroker mb *fakeMapperBroker options controller.VolumeOptions + cleanup string err error ) @@ -81,60 +83,72 @@ var _ = Describe("Provisioner", func() { Context("when the volume does not become available", func() { BeforeEach(func() { vsb.mightFail.set("WaitForAvailableCinderVolume") + cleanup = "DeleteCinderVolume." }) - It("should fail", func() { + It("should fail and delete the volume", func() { Expect(pv).To(BeNil()) Expect(err).To(Not(BeNil())) + Expect(vsb.cleanupLog.String()).To(Equal(cleanup)) }) }) Context("when reserving the volume fails", func() { BeforeEach(func() { vsb.mightFail.set("ReserveCinderVolume") + cleanup = "DeleteCinderVolume." }) - It("should fail", func() { + It("should fail and delete the volume", func() { Expect(pv).To(BeNil()) Expect(err).To(Not(BeNil())) + Expect(vsb.cleanupLog.String()).To(Equal(cleanup)) }) }) Context("when connecting the volume fails", func() { BeforeEach(func() { vsb.mightFail.set("ConnectCinderVolume") + cleanup = "UnreserveCinderVolume.DeleteCinderVolume." }) - It("should fail", func() { + It("should fail and the volume should be unreserved and deleted", func() { Expect(pv).To(BeNil()) Expect(err).To(Not(BeNil())) + Expect(vsb.cleanupLog.String()).To(Equal(cleanup)) }) }) Context("when getting a volumeMapper fails", func() { BeforeEach(func() { mb.mightFail.set("NewVolumeMapperFromConnection") + cleanup = "DisconnectCinderVolume.UnreserveCinderVolume.DeleteCinderVolume." }) - It("should fail", func() { + It("should fail and the volume should be disconnected, unreserved and deleted", func() { Expect(pv).To(BeNil()) Expect(err).To(Not(BeNil())) + Expect(vsb.cleanupLog.String()).To(Equal(cleanup)) }) }) Context("when preparing volume authentication fails", func() { BeforeEach(func() { mb.FakeVolumeMapper.mightFail.set("AuthSetup") + cleanup = "DisconnectCinderVolume.UnreserveCinderVolume.DeleteCinderVolume." }) - It("should fail", func() { + It("should fail and the volume should be disconnected, unreserved and deleted", func() { Expect(pv).To(BeNil()) Expect(err).To(Not(BeNil())) + Expect(vsb.cleanupLog.String()).To(Equal(cleanup)) }) }) Context("when building the PV fails", func() { BeforeEach(func() { mb.mightFail.set("BuildPV") + cleanup = "DisconnectCinderVolume.UnreserveCinderVolume.DeleteCinderVolume." }) - It("should fail", func() { + It("should fail and the volume should be disconnected, unreserved and deleted", func() { Expect(pv).To(BeNil()) Expect(err).To(Not(BeNil())) + Expect(vsb.cleanupLog.String()).To(Equal(cleanup)) }) }) }) @@ -244,7 +258,8 @@ var _ = Describe("Provisioner", func() { }) type fakeVolumeServiceBroker struct { - mightFail failureInjector + cleanupLog bytes.Buffer + mightFail failureInjector volumeServiceBroker } @@ -270,16 +285,25 @@ func (vsb *fakeVolumeServiceBroker) ConnectCinderVolume(vs *gophercloud.ServiceC return volumeservice.VolumeConnection{}, nil } +func (vsb *fakeVolumeServiceBroker) _cleanupRet(fn string) error { + if vsb.mightFail.isSet(fn) { + return errors.New("injected error for testing") + } + vsb.cleanupLog.WriteString(fn) + vsb.cleanupLog.WriteString(".") + return nil +} + func (vsb *fakeVolumeServiceBroker) DisconnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { - return vsb.mightFail.ret("DisconnectCinderVolume") + return vsb._cleanupRet("DisconnectCinderVolume") } func (vsb *fakeVolumeServiceBroker) UnreserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { - return vsb.mightFail.ret("UnreserveCinderVolume") + return vsb._cleanupRet("UnreserveCinderVolume") } func (vsb *fakeVolumeServiceBroker) DeleteCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { - return vsb.mightFail.ret("DeleteCinderVolume") + return vsb._cleanupRet("DeleteCinderVolume") } type fakeMapperBroker struct { From cc20b1c3c312434d4be5621a61474f32baf06a7c Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Wed, 27 Sep 2017 12:43:37 -0400 Subject: [PATCH 14/68] vendor: Add ginkgo and gomega Add the vendored packages ginkgo and gomega which are useful for writing unit tests and are needed by the nfs provisioner and now the standalone-cinder one as well. Signed-off-by: Adam Litke --- glide.lock | 46 +- glide.yaml | 4 +- vendor/github.com/aws/aws-sdk-go/CHANGELOG.md | 30 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../protocol/restjson/build_bench_test.go | 428 +- vendor/github.com/onsi/ginkgo/.gitignore | 4 + vendor/github.com/onsi/ginkgo/.travis.yml | 14 + vendor/github.com/onsi/ginkgo/CHANGELOG.md | 137 + vendor/github.com/onsi/ginkgo/LICENSE | 20 + vendor/github.com/onsi/ginkgo/README.md | 115 + .../github.com/onsi/ginkgo/config/config.go | 187 + vendor/github.com/onsi/ginkgo/ginkgo_dsl.go | 569 + .../internal/codelocation/code_location.go | 32 + .../codelocation/code_location_suite_test.go | 13 + .../codelocation/code_location_test.go | 79 + .../internal/containernode/container_node.go | 151 + .../container_node_suite_test.go | 13 + .../containernode/container_node_test.go | 212 + .../onsi/ginkgo/internal/failer/failer.go | 92 + .../internal/failer/failer_suite_test.go | 13 + .../ginkgo/internal/failer/failer_test.go | 141 + .../ginkgo/internal/leafnodes/benchmarker.go | 103 + .../ginkgo/internal/leafnodes/interfaces.go | 19 + .../onsi/ginkgo/internal/leafnodes/it_node.go | 46 + .../ginkgo/internal/leafnodes/it_node_test.go | 22 + .../leafnodes/leaf_node_suite_test.go | 13 + .../ginkgo/internal/leafnodes/measure_node.go | 61 + .../internal/leafnodes/measure_node_test.go | 154 + .../onsi/ginkgo/internal/leafnodes/runner.go | 113 + .../ginkgo/internal/leafnodes/setup_nodes.go | 41 + .../internal/leafnodes/setup_nodes_test.go | 40 + .../internal/leafnodes/shared_runner_test.go | 359 + .../ginkgo/internal/leafnodes/suite_nodes.go | 54 + .../internal/leafnodes/suite_nodes_test.go | 230 + .../synchronized_after_suite_node.go | 89 + .../synchronized_after_suite_node_test.go | 196 + .../synchronized_before_suite_node.go | 182 + .../synchronized_before_suite_node_test.go | 445 + .../onsi/ginkgo/internal/remote/aggregator.go | 249 + .../ginkgo/internal/remote/aggregator_test.go | 315 + .../remote/fake_output_interceptor_test.go | 17 + .../internal/remote/fake_poster_test.go | 33 + .../internal/remote/forwarding_reporter.go | 90 + .../remote/forwarding_reporter_test.go | 180 + .../internal/remote/output_interceptor.go | 10 + .../remote/output_interceptor_unix.go | 55 + .../internal/remote/output_interceptor_win.go | 33 + .../internal/remote/remote_suite_test.go | 13 + .../onsi/ginkgo/internal/remote/server.go | 224 + .../ginkgo/internal/remote/server_test.go | 269 + .../remote/syscall_dup_linux_arm64.go | 11 + .../internal/remote/syscall_dup_solaris.go | 9 + .../internal/remote/syscall_dup_unix.go | 11 + .../onsi/ginkgo/internal/spec/spec.go | 206 + .../ginkgo/internal/spec/spec_suite_test.go | 13 + .../onsi/ginkgo/internal/spec/spec_test.go | 657 + .../onsi/ginkgo/internal/spec/specs.go | 123 + .../onsi/ginkgo/internal/spec/specs_test.go | 287 + .../internal/spec_iterator/index_computer.go | 55 + .../spec_iterator/index_computer_test.go | 149 + .../spec_iterator/parallel_spec_iterator.go | 60 + .../parallel_spec_iterator_test.go | 112 + .../spec_iterator/serial_spec_iterator.go | 45 + .../serial_spec_iterator_test.go | 64 + .../sharded_parallel_spec_iterator.go | 47 + .../sharded_parallel_spec_iterator_test.go | 62 + .../internal/spec_iterator/spec_iterator.go | 20 + .../spec_iterator/spec_iterator_suite_test.go | 13 + .../ginkgo/internal/specrunner/random_id.go | 15 + .../ginkgo/internal/specrunner/spec_runner.go | 408 + .../specrunner/spec_runner_suite_test.go | 13 + .../internal/specrunner/spec_runner_test.go | 779 + .../onsi/ginkgo/internal/suite/suite.go | 183 + .../ginkgo/internal/suite/suite_suite_test.go | 35 + .../onsi/ginkgo/internal/suite/suite_test.go | 371 + .../internal/testingtproxy/testing_t_proxy.go | 76 + .../ginkgo/internal/writer/fake_writer.go | 36 + .../onsi/ginkgo/internal/writer/writer.go | 81 + .../internal/writer/writer_suite_test.go | 13 + .../ginkgo/internal/writer/writer_test.go | 75 + .../onsi/ginkgo/reporters/default_reporter.go | 84 + .../ginkgo/reporters/default_reporter_test.go | 414 + .../onsi/ginkgo/reporters/fake_reporter.go | 59 + .../onsi/ginkgo/reporters/junit_reporter.go | 147 + .../ginkgo/reporters/junit_reporter_test.go | 247 + .../onsi/ginkgo/reporters/reporter.go | 15 + .../ginkgo/reporters/reporters_suite_test.go | 13 + .../reporters/stenographer/console_logging.go | 64 + .../stenographer/fake_stenographer.go | 142 + .../reporters/stenographer/stenographer.go | 573 + .../stenographer/support/go-colorable/LICENSE | 21 + .../support/go-colorable/README.md | 43 + .../support/go-colorable/colorable_others.go | 24 + .../support/go-colorable/colorable_windows.go | 783 + .../support/go-colorable/noncolorable.go | 57 + .../stenographer/support/go-isatty/LICENSE | 9 + .../stenographer/support/go-isatty/README.md | 37 + .../stenographer/support/go-isatty/doc.go | 2 + .../support/go-isatty/isatty_appengine.go | 9 + .../support/go-isatty/isatty_bsd.go | 18 + .../support/go-isatty/isatty_linux.go | 18 + .../support/go-isatty/isatty_solaris.go | 16 + .../support/go-isatty/isatty_windows.go | 19 + .../ginkgo/reporters/teamcity_reporter.go | 92 + .../reporters/teamcity_reporter_test.go | 213 + .../onsi/ginkgo/types/code_location.go | 15 + .../onsi/ginkgo/types/synchronization.go | 30 + vendor/github.com/onsi/ginkgo/types/types.go | 173 + .../onsi/ginkgo/types/types_suite_test.go | 13 + .../onsi/ginkgo/types/types_test.go | 99 + vendor/github.com/onsi/gomega/.gitignore | 5 + vendor/github.com/onsi/gomega/.travis.yml | 12 + vendor/github.com/onsi/gomega/CHANGELOG.md | 70 + vendor/github.com/onsi/gomega/LICENSE | 20 + vendor/github.com/onsi/gomega/README.md | 21 + .../github.com/onsi/gomega/format/format.go | 277 + .../onsi/gomega/format/format_suite_test.go | 13 + .../onsi/gomega/format/format_test.go | 455 + vendor/github.com/onsi/gomega/gomega_dsl.go | 335 + .../gomega/internal/assertion/assertion.go | 98 + .../assertion/assertion_suite_test.go | 13 + .../internal/assertion/assertion_test.go | 252 + .../asyncassertion/async_assertion.go | 189 + .../async_assertion_suite_test.go | 13 + .../asyncassertion/async_assertion_test.go | 345 + .../internal/oraclematcher/oracle_matcher.go | 25 + .../testingtsupport/testing_t_support.go | 40 + .../testingtsupport/testing_t_support_test.go | 12 + vendor/github.com/onsi/gomega/matchers.go | 418 + vendor/github.com/onsi/gomega/matchers/and.go | 64 + .../onsi/gomega/matchers/and_test.go | 103 + .../matchers/assignable_to_type_of_matcher.go | 31 + .../assignable_to_type_of_matcher_test.go | 30 + .../onsi/gomega/matchers/be_a_directory.go | 54 + .../gomega/matchers/be_a_directory_test.go | 40 + .../onsi/gomega/matchers/be_a_regular_file.go | 54 + .../gomega/matchers/be_a_regular_file_test.go | 40 + .../gomega/matchers/be_an_existing_file.go | 38 + .../matchers/be_an_existing_file_test.go | 40 + .../onsi/gomega/matchers/be_closed_matcher.go | 45 + .../gomega/matchers/be_closed_matcher_test.go | 70 + .../onsi/gomega/matchers/be_empty_matcher.go | 26 + .../gomega/matchers/be_empty_matcher_test.go | 52 + .../matchers/be_equivalent_to_matcher.go | 33 + .../matchers/be_equivalent_to_matcher_test.go | 50 + .../onsi/gomega/matchers/be_false_matcher.go | 25 + .../gomega/matchers/be_false_matcher_test.go | 20 + .../onsi/gomega/matchers/be_identical_to.go | 37 + .../gomega/matchers/be_identical_to_test.go | 61 + .../onsi/gomega/matchers/be_nil_matcher.go | 18 + .../gomega/matchers/be_nil_matcher_test.go | 28 + .../gomega/matchers/be_numerically_matcher.go | 119 + .../matchers/be_numerically_matcher_test.go | 148 + .../onsi/gomega/matchers/be_sent_matcher.go | 71 + .../gomega/matchers/be_sent_matcher_test.go | 106 + .../gomega/matchers/be_temporally_matcher.go | 65 + .../matchers/be_temporally_matcher_test.go | 98 + .../onsi/gomega/matchers/be_true_matcher.go | 25 + .../gomega/matchers/be_true_matcher_test.go | 20 + .../onsi/gomega/matchers/be_zero_matcher.go | 27 + .../gomega/matchers/be_zero_matcher_test.go | 30 + .../onsi/gomega/matchers/consist_of.go | 80 + .../onsi/gomega/matchers/consist_of_test.go | 75 + .../matchers/contain_element_matcher.go | 56 + .../matchers/contain_element_matcher_test.go | 76 + .../matchers/contain_substring_matcher.go | 37 + .../contain_substring_matcher_test.go | 36 + .../onsi/gomega/matchers/equal_matcher.go | 27 + .../gomega/matchers/equal_matcher_test.go | 44 + .../onsi/gomega/matchers/have_cap_matcher.go | 28 + .../gomega/matchers/have_cap_matcher_test.go | 50 + .../onsi/gomega/matchers/have_key_matcher.go | 53 + .../gomega/matchers/have_key_matcher_test.go | 73 + .../matchers/have_key_with_value_matcher.go | 73 + .../have_key_with_value_matcher_test.go | 82 + .../onsi/gomega/matchers/have_len_matcher.go | 27 + .../gomega/matchers/have_len_matcher_test.go | 53 + .../gomega/matchers/have_occurred_matcher.go | 33 + .../matchers/have_occurred_matcher_test.go | 58 + .../gomega/matchers/have_prefix_matcher.go | 35 + .../matchers/have_prefix_matcher_test.go | 36 + .../gomega/matchers/have_suffix_matcher.go | 35 + .../matchers/have_suffix_matcher_test.go | 36 + .../gomega/matchers/match_error_matcher.go | 50 + .../matchers/match_error_matcher_test.go | 93 + .../gomega/matchers/match_json_matcher.go | 64 + .../matchers/match_json_matcher_test.go | 73 + .../gomega/matchers/match_regexp_matcher.go | 42 + .../matchers/match_regexp_matcher_test.go | 44 + .../gomega/matchers/match_yaml_matcher.go | 74 + .../matchers/match_yaml_matcher_test.go | 94 + .../matchers/matcher_tests_suite_test.go | 30 + vendor/github.com/onsi/gomega/matchers/not.go | 30 + .../onsi/gomega/matchers/not_test.go | 57 + vendor/github.com/onsi/gomega/matchers/or.go | 67 + .../onsi/gomega/matchers/or_test.go | 85 + .../onsi/gomega/matchers/panic_matcher.go | 42 + .../gomega/matchers/panic_matcher_test.go | 36 + .../onsi/gomega/matchers/receive_matcher.go | 126 + .../gomega/matchers/receive_matcher_test.go | 280 + .../onsi/gomega/matchers/succeed_matcher.go | 33 + .../gomega/matchers/succeed_matcher_test.go | 62 + .../goraph/bipartitegraph/bipartitegraph.go | 41 + .../bipartitegraph/bipartitegraphmatching.go | 161 + .../matchers/support/goraph/edge/edge.go | 61 + .../matchers/support/goraph/node/node.go | 7 + .../matchers/support/goraph/util/util.go | 7 + .../onsi/gomega/matchers/type_support.go | 176 + .../onsi/gomega/matchers/with_transform.go | 72 + .../gomega/matchers/with_transform_test.go | 102 + vendor/github.com/onsi/gomega/types/types.go | 17 + .../v2beta2/cloudmonitoring-api.json | 839 - .../v2beta2/cloudmonitoring-gen.go | 2247 - .../api/compute/v0.beta/compute-api.json | 23661 ----- .../api/compute/v0.beta/compute-gen.go | 73513 ---------------- .../google.golang.org/api/dns/v1/dns-api.json | 708 - .../google.golang.org/api/dns/v1/dns-gen.go | 2028 - .../api/logging/v2beta1/logging-api.json | 1526 - .../api/logging/v2beta1/logging-gen.go | 4414 - .../api/monitoring/v3/monitoring-api.json | 1684 - .../api/monitoring/v3/monitoring-gen.go | 4573 - .../api/pubsub/v1/pubsub-api.json | 1159 - .../api/pubsub/v1/pubsub-gen.go | 4458 - 223 files changed, 20616 insertions(+), 121039 deletions(-) create mode 100644 vendor/github.com/onsi/ginkgo/.gitignore create mode 100644 vendor/github.com/onsi/ginkgo/.travis.yml create mode 100644 vendor/github.com/onsi/ginkgo/CHANGELOG.md create mode 100644 vendor/github.com/onsi/ginkgo/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/README.md create mode 100644 vendor/github.com/onsi/ginkgo/config/config.go create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo_dsl.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/failer/failer.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/server.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/server_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/spec.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/specs.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/suite/suite.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/writer.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/default_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/junit_reporter_test.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/reporters_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go create mode 100644 vendor/github.com/onsi/ginkgo/types/code_location.go create mode 100644 vendor/github.com/onsi/ginkgo/types/synchronization.go create mode 100644 vendor/github.com/onsi/ginkgo/types/types.go create mode 100644 vendor/github.com/onsi/ginkgo/types/types_suite_test.go create mode 100644 vendor/github.com/onsi/ginkgo/types/types_test.go create mode 100644 vendor/github.com/onsi/gomega/.gitignore create mode 100644 vendor/github.com/onsi/gomega/.travis.yml create mode 100644 vendor/github.com/onsi/gomega/CHANGELOG.md create mode 100644 vendor/github.com/onsi/gomega/LICENSE create mode 100644 vendor/github.com/onsi/gomega/README.md create mode 100644 vendor/github.com/onsi/gomega/format/format.go create mode 100644 vendor/github.com/onsi/gomega/format/format_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/format/format_test.go create mode 100644 vendor/github.com/onsi/gomega/gomega_dsl.go create mode 100644 vendor/github.com/onsi/gomega/internal/assertion/assertion.go create mode 100644 vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go create mode 100644 vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go create mode 100644 vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go create mode 100644 vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go create mode 100644 vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go create mode 100644 vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers.go create mode 100644 vendor/github.com/onsi/gomega/matchers/and.go create mode 100644 vendor/github.com/onsi/gomega/matchers/and_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_a_directory.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_false_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_identical_to.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_true_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/consist_of.go create mode 100644 vendor/github.com/onsi/gomega/matchers/consist_of_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/equal_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_key_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_len_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_error_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_json_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/not.go create mode 100644 vendor/github.com/onsi/gomega/matchers/not_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/or.go create mode 100644 vendor/github.com/onsi/gomega/matchers/or_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/panic_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/receive_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/succeed_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go create mode 100644 vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go create mode 100644 vendor/github.com/onsi/gomega/matchers/type_support.go create mode 100644 vendor/github.com/onsi/gomega/matchers/with_transform.go create mode 100644 vendor/github.com/onsi/gomega/matchers/with_transform_test.go create mode 100644 vendor/github.com/onsi/gomega/types/types.go delete mode 100644 vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json delete mode 100644 vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go delete mode 100644 vendor/google.golang.org/api/compute/v0.beta/compute-api.json delete mode 100644 vendor/google.golang.org/api/compute/v0.beta/compute-gen.go delete mode 100644 vendor/google.golang.org/api/dns/v1/dns-api.json delete mode 100644 vendor/google.golang.org/api/dns/v1/dns-gen.go delete mode 100644 vendor/google.golang.org/api/logging/v2beta1/logging-api.json delete mode 100644 vendor/google.golang.org/api/logging/v2beta1/logging-gen.go delete mode 100644 vendor/google.golang.org/api/monitoring/v3/monitoring-api.json delete mode 100644 vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go delete mode 100644 vendor/google.golang.org/api/pubsub/v1/pubsub-api.json delete mode 100644 vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go diff --git a/glide.lock b/glide.lock index 802efcea3b4..837c012f5d7 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 1626357572d7723d44f5c3d8ba27e94a6c95a306ff8906f9d79a2e2711d5af9f -updated: 2017-09-26T20:49:30.595112131+02:00 +hash: 1334daf8f668df5298c2ebb2132e3e45def3f7fdf3f3e4558912a1133424fed4 +updated: 2017-09-27T12:40:51.281879295-04:00 imports: - name: cloud.google.com/go version: 3b1ae45394a234c385be014e9a488f2bb6eef821 @@ -7,7 +7,7 @@ imports: - compute/metadata - internal - name: github.com/aws/aws-sdk-go - version: b69f447375c7fa0047ebcdd8ae5d585d5aac2f71 + version: 93c0610f5cfe455cdccfdbe17bb0276764e62f1f subpackages: - aws - aws/awserr @@ -214,6 +214,40 @@ imports: - pbutil - name: github.com/mitchellh/mapstructure version: db1efb556f84b25a0a13a04aad883943538ad2e0 +- name: github.com/onsi/ginkgo + version: 67b9df7f55fe1165fd9ad49aca7754cce01a42b8 + subpackages: + - config + - internal/codelocation + - internal/containernode + - internal/failer + - internal/leafnodes + - internal/remote + - internal/spec + - internal/spec_iterator + - internal/specrunner + - internal/suite + - internal/testingtproxy + - internal/writer + - reporters + - reporters/stenographer + - reporters/stenographer/support/go-colorable + - reporters/stenographer/support/go-isatty + - types +- name: github.com/onsi/gomega + version: d59fa0ac68bb5dd932ee8d24eed631cdd519efc3 + subpackages: + - format + - internal/assertion + - internal/asyncassertion + - internal/oraclematcher + - internal/testingtsupport + - matchers + - matchers/support/goraph/bipartitegraph + - matchers/support/goraph/edge + - matchers/support/goraph/node + - matchers/support/goraph/util + - types - name: github.com/pborman/uuid version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 - name: github.com/pelletier/go-buffruneio @@ -315,17 +349,11 @@ imports: - name: google.golang.org/api version: e3824ed33c72bf7e81da0286772c34b987520914 subpackages: - - cloudmonitoring/v2beta2 - - compute/v0.beta - compute/v1 - container/v1 - - dns/v1 - gensupport - googleapi - googleapi/internal/uritemplates - - logging/v2beta1 - - monitoring/v3 - - pubsub/v1 - name: google.golang.org/appengine version: 4f7eeb5305a4ba1966344836ba4af9996b7b4e05 subpackages: diff --git a/glide.yaml b/glide.yaml index 49d34fb738b..8432d7fc6bf 100644 --- a/glide.yaml +++ b/glide.yaml @@ -45,9 +45,9 @@ import: version: b4c2377fa77951a0e08163f52dc9b3e206355194 - package: github.com/davecgh/go-spew version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - supbackages: - - spew - package: golang.org/x/sys version: a2e06a18 subpackages: - unix +- package: github.com/onsi/ginkgo +- package: github.com/onsi/gomega diff --git a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md index 9e194f1ff71..a404db8cbea 100644 --- a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md @@ -1,3 +1,33 @@ +Release v1.12.0 (2017-09-26) +=== + +### SDK Bugs +* `API Marshaler`: Revert REST JSON and XML protocol marshaler improvements + * Bug [#1550](https://github.com/aws/aws-sdk-go/issues/1550) identified a missed condition in the Amazon Route 53 RESTXML protocol marshaling causing requests to that service to fail. Reverting the marshaler improvements until the bug can be fixed. + +Release v1.11.0 (2017-09-26) +=== + +### Service Client Updates +* `service/cloudformation`: Updates service API and documentation + * You can now prevent a stack from being accidentally deleted by enabling termination protection on the stack. If you attempt to delete a stack with termination protection enabled, the deletion fails and the stack, including its status, remains unchanged. You can enable termination protection on a stack when you create it. Termination protection on stacks is disabled by default. After creation, you can set termination protection on a stack whose status is CREATE_COMPLETE, UPDATE_COMPLETE, or UPDATE_ROLLBACK_COMPLETE. + +### SDK Features +* Add dep Go dependency management metadata files (#1544) + * Adds the Go `dep` dependency management metadata files to the SDK. + * Fixes [#1451](https://github.com/aws/aws-sdk-go/issues/1451) + * Fixes [#634](https://github.com/aws/aws-sdk-go/issues/634) +* `service/dynamodb/expression`: Add expression building utility for DynamoDB ([#1527](https://github.com/aws/aws-sdk-go/pull/1527)) + * Adds a new package, expression, to the SDK providing builder utilities to create DynamoDB expressions safely taking advantage of type safety. +* `API Marshaler`: Add generated marshalers for RESTXML protocol ([#1409](https://github.com/aws/aws-sdk-go/pull/1409)) + * Updates the RESTXML protocol marshaler to use generated code instead of reflection for REST XML based services. +* `API Marshaler`: Add generated marshalers for RESTJSON protocol ([#1547](https://github.com/aws/aws-sdk-go/pull/1547)) + * Updates the RESTJSON protocol marshaler to use generated code instead of reflection for REST JSON based services. + +### SDK Enhancements +* `private/protocol`: Update format of REST JSON and XMl benchmarks ([#1546](https://github.com/aws/aws-sdk-go/pull/1546)) + * Updates the format of the REST JSON and XML benchmarks to be readable. RESTJSON benchmarks were updated to more accurately bench building of the protocol. + Release v1.10.51 (2017-09-22) === diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 66802ad327b..5d6b17f327e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.10.51" +const SDKVersion = "1.12.0" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go index 31e1d6c04c7..7a8a8c2c382 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_bench_test.go @@ -3,232 +3,131 @@ package restjson_test import ( - "bytes" - "encoding/json" + "net/http" + "net/http/httptest" + "os" "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/awstesting" - "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/private/protocol/restjson" "github.com/aws/aws-sdk-go/service/elastictranscoder" ) -func BenchmarkRESTJSONBuild_Complex_elastictranscoderCreateJobInput(b *testing.B) { - svc := awstesting.NewClient() - svc.ServiceName = "elastictranscoder" - svc.APIVersion = "2012-09-25" +var ( + elastictranscoderSvc *elastictranscoder.ElasticTranscoder +) - for i := 0; i < b.N; i++ { - r := svc.NewRequest(&request.Operation{Name: "CreateJobInput"}, restjsonBuildParms, nil) - restjson.Build(r) - if r.Error != nil { - b.Fatal("Unexpected error", r.Error) - } - } +func TestMain(m *testing.M) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + sess := session.Must(session.NewSession(&aws.Config{ + Credentials: credentials.NewStaticCredentials("Key", "Secret", "Token"), + Endpoint: aws.String(server.URL), + S3ForcePathStyle: aws.Bool(true), + DisableSSL: aws.Bool(true), + Region: aws.String(endpoints.UsWest2RegionID), + })) + elastictranscoderSvc = elastictranscoder.New(sess) + + c := m.Run() + server.Close() + os.Exit(c) } -func BenchmarkRESTBuild_Complex_elastictranscoderCreateJobInput(b *testing.B) { - svc := awstesting.NewClient() - svc.ServiceName = "elastictranscoder" - svc.APIVersion = "2012-09-25" +func BenchmarkRESTJSONBuild_Complex_ETCCreateJob(b *testing.B) { + params := elastictranscoderCreateJobInput() - for i := 0; i < b.N; i++ { - r := svc.NewRequest(&request.Operation{Name: "CreateJobInput"}, restjsonBuildParms, nil) - rest.Build(r) - if r.Error != nil { - b.Fatal("Unexpected error", r.Error) - } - } + benchRESTJSONBuild(b, func() *request.Request { + req, _ := elastictranscoderSvc.CreateJobRequest(params) + return req + }) } -func BenchmarkEncodingJSONMarshal_Complex_elastictranscoderCreateJobInput(b *testing.B) { - params := restjsonBuildParms +func BenchmarkRESTJSONBuild_Simple_ETCListJobsByPipeline(b *testing.B) { + params := elastictranscoderListJobsByPipeline() - for i := 0; i < b.N; i++ { - buf := &bytes.Buffer{} - encoder := json.NewEncoder(buf) - if err := encoder.Encode(params); err != nil { - b.Fatal("Unexpected error", err) - } - } + benchRESTJSONBuild(b, func() *request.Request { + req, _ := elastictranscoderSvc.ListJobsByPipelineRequest(params) + return req + }) } -func BenchmarkRESTJSONBuild_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { - svc := awstesting.NewClient() - svc.ServiceName = "elastictranscoder" - svc.APIVersion = "2012-09-25" +func BenchmarkRESTJSONRequest_Complex_CFCreateJob(b *testing.B) { + benchRESTJSONRequest(b, func() *request.Request { + req, _ := elastictranscoderSvc.CreateJobRequest(elastictranscoderCreateJobInput()) + return req + }) +} - params := &elastictranscoder.ListJobsByPipelineInput{ - PipelineId: aws.String("Id"), // Required - Ascending: aws.String("Ascending"), - PageToken: aws.String("Id"), - } +func BenchmarkRESTJSONRequest_Simple_ETCListJobsByPipeline(b *testing.B) { + benchRESTJSONRequest(b, func() *request.Request { + req, _ := elastictranscoderSvc.ListJobsByPipelineRequest(elastictranscoderListJobsByPipeline()) + return req + }) +} + +func benchRESTJSONBuild(b *testing.B, reqFn func() *request.Request) { + b.ResetTimer() for i := 0; i < b.N; i++ { - r := svc.NewRequest(&request.Operation{Name: "ListJobsByPipeline"}, params, nil) - restjson.Build(r) - if r.Error != nil { - b.Fatal("Unexpected error", r.Error) + req := reqFn() + restjson.Build(req) + if req.Error != nil { + b.Fatal("Unexpected error", req.Error) } } } -func BenchmarkRESTBuild_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { - svc := awstesting.NewClient() - svc.ServiceName = "elastictranscoder" - svc.APIVersion = "2012-09-25" - - params := &elastictranscoder.ListJobsByPipelineInput{ - PipelineId: aws.String("Id"), // Required - Ascending: aws.String("Ascending"), - PageToken: aws.String("Id"), - } +func benchRESTJSONRequest(b *testing.B, reqFn func() *request.Request) { + b.ResetTimer() for i := 0; i < b.N; i++ { - r := svc.NewRequest(&request.Operation{Name: "ListJobsByPipeline"}, params, nil) - rest.Build(r) - if r.Error != nil { - b.Fatal("Unexpected error", r.Error) + err := reqFn().Send() + if err != nil { + b.Fatal("Unexpected error", err) } } } -func BenchmarkEncodingJSONMarshal_Simple_elastictranscoderListJobsByPipeline(b *testing.B) { - params := &elastictranscoder.ListJobsByPipelineInput{ +func elastictranscoderListJobsByPipeline() *elastictranscoder.ListJobsByPipelineInput { + return &elastictranscoder.ListJobsByPipelineInput{ PipelineId: aws.String("Id"), // Required Ascending: aws.String("Ascending"), PageToken: aws.String("Id"), } - - for i := 0; i < b.N; i++ { - buf := &bytes.Buffer{} - encoder := json.NewEncoder(buf) - if err := encoder.Encode(params); err != nil { - b.Fatal("Unexpected error", err) - } - } } -var restjsonBuildParms = &elastictranscoder.CreateJobInput{ - Input: &elastictranscoder.JobInput{ // Required - AspectRatio: aws.String("AspectRatio"), - Container: aws.String("JobContainer"), - DetectedProperties: &elastictranscoder.DetectedProperties{ - DurationMillis: aws.Int64(1), - FileSize: aws.Int64(1), - FrameRate: aws.String("FloatString"), - Height: aws.Int64(1), - Width: aws.Int64(1), - }, - Encryption: &elastictranscoder.Encryption{ - InitializationVector: aws.String("ZeroTo255String"), - Key: aws.String("Base64EncodedString"), - KeyMd5: aws.String("Base64EncodedString"), - Mode: aws.String("EncryptionMode"), - }, - FrameRate: aws.String("FrameRate"), - Interlaced: aws.String("Interlaced"), - Key: aws.String("Key"), - Resolution: aws.String("Resolution"), - }, - PipelineId: aws.String("Id"), // Required - Output: &elastictranscoder.CreateJobOutput{ - AlbumArt: &elastictranscoder.JobAlbumArt{ - Artwork: []*elastictranscoder.Artwork{ - { // Required - AlbumArtFormat: aws.String("JpgOrPng"), - Encryption: &elastictranscoder.Encryption{ - InitializationVector: aws.String("ZeroTo255String"), - Key: aws.String("Base64EncodedString"), - KeyMd5: aws.String("Base64EncodedString"), - Mode: aws.String("EncryptionMode"), - }, - InputKey: aws.String("WatermarkKey"), - MaxHeight: aws.String("DigitsOrAuto"), - MaxWidth: aws.String("DigitsOrAuto"), - PaddingPolicy: aws.String("PaddingPolicy"), - SizingPolicy: aws.String("SizingPolicy"), - }, - // More values... - }, - MergePolicy: aws.String("MergePolicy"), - }, - Captions: &elastictranscoder.Captions{ - CaptionFormats: []*elastictranscoder.CaptionFormat{ - { // Required - Encryption: &elastictranscoder.Encryption{ - InitializationVector: aws.String("ZeroTo255String"), - Key: aws.String("Base64EncodedString"), - KeyMd5: aws.String("Base64EncodedString"), - Mode: aws.String("EncryptionMode"), - }, - Format: aws.String("CaptionFormatFormat"), - Pattern: aws.String("CaptionFormatPattern"), - }, - // More values... - }, - CaptionSources: []*elastictranscoder.CaptionSource{ - { // Required - Encryption: &elastictranscoder.Encryption{ - InitializationVector: aws.String("ZeroTo255String"), - Key: aws.String("Base64EncodedString"), - KeyMd5: aws.String("Base64EncodedString"), - Mode: aws.String("EncryptionMode"), - }, - Key: aws.String("Key"), - Label: aws.String("Name"), - Language: aws.String("Key"), - TimeOffset: aws.String("TimeOffset"), - }, - // More values... - }, - MergePolicy: aws.String("CaptionMergePolicy"), - }, - Composition: []*elastictranscoder.Clip{ - { // Required - TimeSpan: &elastictranscoder.TimeSpan{ - Duration: aws.String("Time"), - StartTime: aws.String("Time"), - }, +func elastictranscoderCreateJobInput() *elastictranscoder.CreateJobInput { + return &elastictranscoder.CreateJobInput{ + Input: &elastictranscoder.JobInput{ // Required + AspectRatio: aws.String("AspectRatio"), + Container: aws.String("JobContainer"), + DetectedProperties: &elastictranscoder.DetectedProperties{ + DurationMillis: aws.Int64(1), + FileSize: aws.Int64(1), + FrameRate: aws.String("FloatString"), + Height: aws.Int64(1), + Width: aws.Int64(1), }, - // More values... - }, - Encryption: &elastictranscoder.Encryption{ - InitializationVector: aws.String("ZeroTo255String"), - Key: aws.String("Base64EncodedString"), - KeyMd5: aws.String("Base64EncodedString"), - Mode: aws.String("EncryptionMode"), - }, - Key: aws.String("Key"), - PresetId: aws.String("Id"), - Rotate: aws.String("Rotate"), - SegmentDuration: aws.String("FloatString"), - ThumbnailEncryption: &elastictranscoder.Encryption{ - InitializationVector: aws.String("ZeroTo255String"), - Key: aws.String("Base64EncodedString"), - KeyMd5: aws.String("Base64EncodedString"), - Mode: aws.String("EncryptionMode"), - }, - ThumbnailPattern: aws.String("ThumbnailPattern"), - Watermarks: []*elastictranscoder.JobWatermark{ - { // Required - Encryption: &elastictranscoder.Encryption{ - InitializationVector: aws.String("ZeroTo255String"), - Key: aws.String("Base64EncodedString"), - KeyMd5: aws.String("Base64EncodedString"), - Mode: aws.String("EncryptionMode"), - }, - InputKey: aws.String("WatermarkKey"), - PresetWatermarkId: aws.String("PresetWatermarkId"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), }, - // More values... + FrameRate: aws.String("FrameRate"), + Interlaced: aws.String("Interlaced"), + Key: aws.String("Key"), + Resolution: aws.String("Resolution"), }, - }, - OutputKeyPrefix: aws.String("Key"), - Outputs: []*elastictranscoder.CreateJobOutput{ - { // Required + PipelineId: aws.String("Id"), // Required + Output: &elastictranscoder.CreateJobOutput{ AlbumArt: &elastictranscoder.JobAlbumArt{ Artwork: []*elastictranscoder.Artwork{ { // Required @@ -320,37 +219,132 @@ var restjsonBuildParms = &elastictranscoder.CreateJobInput{ // More values... }, }, - // More values... - }, - Playlists: []*elastictranscoder.CreateJobPlaylist{ - { // Required - Format: aws.String("PlaylistFormat"), - HlsContentProtection: &elastictranscoder.HlsContentProtection{ - InitializationVector: aws.String("ZeroTo255String"), - Key: aws.String("Base64EncodedString"), - KeyMd5: aws.String("Base64EncodedString"), - KeyStoragePolicy: aws.String("KeyStoragePolicy"), - LicenseAcquisitionUrl: aws.String("ZeroTo512String"), - Method: aws.String("HlsContentProtectionMethod"), - }, - Name: aws.String("Filename"), - OutputKeys: []*string{ - aws.String("Key"), // Required - // More values... + OutputKeyPrefix: aws.String("Key"), + Outputs: []*elastictranscoder.CreateJobOutput{ + { // Required + AlbumArt: &elastictranscoder.JobAlbumArt{ + Artwork: []*elastictranscoder.Artwork{ + { // Required + AlbumArtFormat: aws.String("JpgOrPng"), + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + MaxHeight: aws.String("DigitsOrAuto"), + MaxWidth: aws.String("DigitsOrAuto"), + PaddingPolicy: aws.String("PaddingPolicy"), + SizingPolicy: aws.String("SizingPolicy"), + }, + // More values... + }, + MergePolicy: aws.String("MergePolicy"), + }, + Captions: &elastictranscoder.Captions{ + CaptionFormats: []*elastictranscoder.CaptionFormat{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Format: aws.String("CaptionFormatFormat"), + Pattern: aws.String("CaptionFormatPattern"), + }, + // More values... + }, + CaptionSources: []*elastictranscoder.CaptionSource{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + Label: aws.String("Name"), + Language: aws.String("Key"), + TimeOffset: aws.String("TimeOffset"), + }, + // More values... + }, + MergePolicy: aws.String("CaptionMergePolicy"), + }, + Composition: []*elastictranscoder.Clip{ + { // Required + TimeSpan: &elastictranscoder.TimeSpan{ + Duration: aws.String("Time"), + StartTime: aws.String("Time"), + }, + }, + // More values... + }, + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + Key: aws.String("Key"), + PresetId: aws.String("Id"), + Rotate: aws.String("Rotate"), + SegmentDuration: aws.String("FloatString"), + ThumbnailEncryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + ThumbnailPattern: aws.String("ThumbnailPattern"), + Watermarks: []*elastictranscoder.JobWatermark{ + { // Required + Encryption: &elastictranscoder.Encryption{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + Mode: aws.String("EncryptionMode"), + }, + InputKey: aws.String("WatermarkKey"), + PresetWatermarkId: aws.String("PresetWatermarkId"), + }, + // More values... + }, }, - PlayReadyDrm: &elastictranscoder.PlayReadyDrm{ - Format: aws.String("PlayReadyDrmFormatString"), - InitializationVector: aws.String("ZeroTo255String"), - Key: aws.String("NonEmptyBase64EncodedString"), - KeyId: aws.String("KeyIdGuid"), - KeyMd5: aws.String("NonEmptyBase64EncodedString"), - LicenseAcquisitionUrl: aws.String("OneTo512String"), + // More values... + }, + Playlists: []*elastictranscoder.CreateJobPlaylist{ + { // Required + Format: aws.String("PlaylistFormat"), + HlsContentProtection: &elastictranscoder.HlsContentProtection{ + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("Base64EncodedString"), + KeyMd5: aws.String("Base64EncodedString"), + KeyStoragePolicy: aws.String("KeyStoragePolicy"), + LicenseAcquisitionUrl: aws.String("ZeroTo512String"), + Method: aws.String("HlsContentProtectionMethod"), + }, + Name: aws.String("Filename"), + OutputKeys: []*string{ + aws.String("Key"), // Required + // More values... + }, + PlayReadyDrm: &elastictranscoder.PlayReadyDrm{ + Format: aws.String("PlayReadyDrmFormatString"), + InitializationVector: aws.String("ZeroTo255String"), + Key: aws.String("NonEmptyBase64EncodedString"), + KeyId: aws.String("KeyIdGuid"), + KeyMd5: aws.String("NonEmptyBase64EncodedString"), + LicenseAcquisitionUrl: aws.String("OneTo512String"), + }, }, + // More values... + }, + UserMetadata: map[string]*string{ + "Key": aws.String("String"), // Required + // More values... }, - // More values... - }, - UserMetadata: map[string]*string{ - "Key": aws.String("String"), // Required - // More values... - }, + } } diff --git a/vendor/github.com/onsi/ginkgo/.gitignore b/vendor/github.com/onsi/ginkgo/.gitignore new file mode 100644 index 00000000000..922b4f7f919 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +TODO +tmp/**/* +*.coverprofile \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml new file mode 100644 index 00000000000..19ca78b65fa --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: + - 1.5 + - 1.6 + - 1.7 + +install: + - go get -v -t ./... + - go get golang.org/x/tools/cmd/cover + - go get github.com/onsi/gomega + - go install github.com/onsi/ginkgo/ginkgo + - export PATH=$PATH:$HOME/gopath/bin + +script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md new file mode 100644 index 00000000000..5bc46f919e0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -0,0 +1,137 @@ +## HEAD + +Improvements: + +- `Skip(message)` can be used to skip the current test. +- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests) +- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed` + +Bug Fixes: + +- Ginkgo tests now fail when you `panic(nil)` (#167) + +## 1.2.0 5/31/2015 + +Improvements + +- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160) +- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152) +- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166) + +## 1.2.0-beta + +Ginkgo now requires Go 1.4+ + +Improvements: + +- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does. +- Improved focus behavior. Now, this: + + ```golang + FDescribe("Some describe", func() { + It("A", func() {}) + + FIt("B", func() {}) + }) + ``` + + will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests. +- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests. +- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs. +- Improved output when an error occurs in a setup or teardown block. +- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order. +- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`. +- Add support for precompiled tests: + - `ginkgo build ` will now compile the package, producing a file named `package.test` + - The compiled `package.test` file can be run directly. This runs the tests in series. + - To run precompiled tests in parallel, you can run: `ginkgo -p package.test` +- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs. +- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory +- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump. +- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+. +- `ginkgo -notify` now works on Linux + +Bug Fixes: + +- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0. +- Fix tempfile leak when running in parallel +- Fix incorrect failure message when a panic occurs during a parallel test run +- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests. +- Be more consistent about handling SIGTERM as well as SIGINT +- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts. +- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!) + +## 1.1.0 (8/2/2014) + +No changes, just dropping the beta. + +## 1.1.0-beta (7/22/2014) +New Features: + +- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag. +- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites. +- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes. +- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command. +- `ginkgo --failFast` aborts the test suite after the first failure. +- `ginkgo generate file_1 file_2` can take multiple file arguments. +- Ginkgo now summarizes any spec failures that occured at the end of the test run. +- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed. + +Improvements: + +- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped. +- `ginkgo --untilItFails` no longer recompiles between attempts. +- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed. + +Bug Fixes: + +- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`. +- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic + +## 1.0.0 (5/24/2014) +New Features: + +- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode` + +Improvements: + +- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor. +- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified. + +Bug Fixes: + +- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s. +- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail. +- Fix all remaining race conditions in Ginkgo's test suite. + +## 1.0.0-beta (4/14/2014) +Breaking changes: + +- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead +- Modified the Reporter interface +- `watch` is now a subcommand, not a flag. + +DSL changes: + +- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites. +- `AfterSuite` is triggered on interrupt (`^C`) as well as exit. +- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes. + +CLI changes: + +- `watch` is now a subcommand, not a flag +- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot` +- Additional arguments can be passed to specs. Pass them after the `--` separator +- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp. +- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs. + +Misc: + +- Start using semantic versioning +- Start maintaining changelog + +Major refactor: + +- Pull out Ginkgo's internal to `internal` +- Rename `example` everywhere to `spec` +- Much more! diff --git a/vendor/github.com/onsi/ginkgo/LICENSE b/vendor/github.com/onsi/ginkgo/LICENSE new file mode 100644 index 00000000000..9415ee72c17 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md new file mode 100644 index 00000000000..50d32ba1c59 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/README.md @@ -0,0 +1,115 @@ +![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png) + +[![Build Status](https://travis-ci.org/onsi/ginkgo.svg)](https://travis-ci.org/onsi/ginkgo) + +Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! + +To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega). + +## Feature List + +- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite) + +- Structure your BDD-style tests expressively: + - Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context) + - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown + - [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions + - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern). + - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite. + +- A comprehensive test runner that lets you: + - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs) + - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line + - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order. + - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs) + +- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples: + - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime + - `ginkgo -cover` runs your tests using Golang's code coverage tool + - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package + - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression + - `ginkgo -r` runs all tests suites under the current directory + - `ginkgo -v` prints out identifying information for each tests just before it runs + + And much more: run `ginkgo help` for details! + + The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test` + +- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop! + +- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests) + +- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code. + +- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`. + +- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details. + +- A modular architecture that lets you easily: + - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter). + - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo + +## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library + +Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/) + +## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework + +Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org) + +## Set Me Up! + +You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!) + +```bash + +go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI +go get github.com/onsi/gomega # fetches the matcher library + +cd path/to/package/you/want/to/test + +ginkgo bootstrap # set up a new ginkgo suite +ginkgo generate # will create a sample test file. edit this file and add your tests then... + +go test # to run your tests + +ginkgo # also runs your tests + +``` + +## I'm new to Go: What are my testing options? + +Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set. + +With that said, it's great to know what your options are :) + +### What Golang gives you out of the box + +Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library. + +### Matcher libraries for Golang's XUnit style tests + +A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction: + +- [testify](https://github.com/stretchr/testify) +- [gocheck](http://labix.org/gocheck) + +You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests) + +### BDD style testing frameworks + +There are a handful of BDD-style testing frameworks written for Golang. Here are a few: + +- [Ginkgo](https://github.com/onsi/ginkgo) ;) +- [GoConvey](https://github.com/smartystreets/goconvey) +- [Goblin](https://github.com/franela/goblin) +- [Mao](https://github.com/azer/mao) +- [Zen](https://github.com/pranavraja/zen) + +Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries. + +Go explore! + +## License + +Ginkgo is MIT-Licensed diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go new file mode 100644 index 00000000000..4b5485b4157 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -0,0 +1,187 @@ +/* +Ginkgo accepts a number of configuration options. + +These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli) + +You can also learn more via + + ginkgo help + +or (I kid you not): + + go test -asdf +*/ +package config + +import ( + "flag" + "time" + + "fmt" +) + +const VERSION = "1.2.0" + +type GinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + RegexScansFilePath bool + FocusString string + SkipString string + SkipMeasurements bool + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +var GinkgoConfig = GinkgoConfigType{} + +type DefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + Succinct bool + Verbose bool + FullTrace bool +} + +var DefaultReporterConfig = DefaultReporterConfigType{} + +func processPrefix(prefix string) string { + if prefix != "" { + prefix = prefix + "." + } + return prefix +} + +func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { + prefix = processPrefix(prefix) + flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") + flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.") + flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.") + flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.") + flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.") + + flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.") + + flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.") + flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.") + + flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).") + + flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.") + + flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.") + + if includeParallelFlags { + flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.") + flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.") + flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.") + flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.") + } + + flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.") + flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.") + flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.") + flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") + flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") + flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") +} + +func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string { + prefix = processPrefix(prefix) + result := make([]string, 0) + + if ginkgo.RandomSeed > 0 { + result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed)) + } + + if ginkgo.RandomizeAllSpecs { + result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix)) + } + + if ginkgo.SkipMeasurements { + result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix)) + } + + if ginkgo.FailOnPending { + result = append(result, fmt.Sprintf("--%sfailOnPending", prefix)) + } + + if ginkgo.FailFast { + result = append(result, fmt.Sprintf("--%sfailFast", prefix)) + } + + if ginkgo.DryRun { + result = append(result, fmt.Sprintf("--%sdryRun", prefix)) + } + + if ginkgo.FocusString != "" { + result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString)) + } + + if ginkgo.SkipString != "" { + result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString)) + } + + if ginkgo.FlakeAttempts > 1 { + result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts)) + } + + if ginkgo.EmitSpecProgress { + result = append(result, fmt.Sprintf("--%sprogress", prefix)) + } + + if ginkgo.ParallelNode != 0 { + result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode)) + } + + if ginkgo.ParallelTotal != 0 { + result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal)) + } + + if ginkgo.StreamHost != "" { + result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost)) + } + + if ginkgo.SyncHost != "" { + result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost)) + } + + if ginkgo.RegexScansFilePath { + result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix)) + } + + if reporter.NoColor { + result = append(result, fmt.Sprintf("--%snoColor", prefix)) + } + + if reporter.SlowSpecThreshold > 0 { + result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold)) + } + + if !reporter.NoisyPendings { + result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix)) + } + + if reporter.Verbose { + result = append(result, fmt.Sprintf("--%sv", prefix)) + } + + if reporter.Succinct { + result = append(result, fmt.Sprintf("--%ssuccinct", prefix)) + } + + if reporter.FullTrace { + result = append(result, fmt.Sprintf("--%strace", prefix)) + } + + return result +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go new file mode 100644 index 00000000000..8fe0b70a62f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -0,0 +1,569 @@ +/* +Ginkgo is a BDD-style testing framework for Golang + +The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/ + +Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega) + +Ginkgo on Github: http://github.com/onsi/ginkgo + +Ginkgo is MIT-Licensed +*/ +package ginkgo + +import ( + "flag" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/remote" + "github.com/onsi/ginkgo/internal/suite" + "github.com/onsi/ginkgo/internal/testingtproxy" + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +const GINKGO_VERSION = config.VERSION +const GINKGO_PANIC = ` +Your test failed. +Ginkgo panics to prevent subsequent assertions from running. +Normally Ginkgo rescues this panic so you shouldn't see it. + +But, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. +` +const defaultTimeout = 1 + +var globalSuite *suite.Suite +var globalFailer *failer.Failer + +func init() { + config.Flags(flag.CommandLine, "ginkgo", true) + GinkgoWriter = writer.New(os.Stdout) + globalFailer = failer.New() + globalSuite = suite.New(globalFailer) +} + +//GinkgoWriter implements an io.Writer +//When running in verbose mode any writes to GinkgoWriter will be immediately printed +//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen +//only if the current test fails. +var GinkgoWriter io.Writer + +//The interface by which Ginkgo receives *testing.T +type GinkgoTestingT interface { + Fail() +} + +//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is +//useful for seeding your own pseudorandom number generators (PRNGs) to ensure +//consistent executions from run to run, where your tests contain variability (for +//example, when selecting random test data). +func GinkgoRandomSeed() int64 { + return config.GinkgoConfig.RandomSeed +} + +//GinkgoParallelNode returns the parallel node number for the current ginkgo process +//The node number is 1-indexed +func GinkgoParallelNode() int { + return config.GinkgoConfig.ParallelNode +} + +//Some matcher libraries or legacy codebases require a *testing.T +//GinkgoT implements an interface analogous to *testing.T and can be used if +//the library in question accepts *testing.T through an interface +// +// For example, with testify: +// assert.Equal(GinkgoT(), 123, 123, "they should be equal") +// +// Or with gomock: +// gomock.NewController(GinkgoT()) +// +// GinkgoT() takes an optional offset argument that can be used to get the +// correct line number associated with the failure. +func GinkgoT(optionalOffset ...int) GinkgoTInterface { + offset := 3 + if len(optionalOffset) > 0 { + offset = optionalOffset[0] + } + return testingtproxy.New(GinkgoWriter, Fail, offset) +} + +//The interface returned by GinkgoT(). This covers most of the methods +//in the testing package's T. +type GinkgoTInterface interface { + Fail() + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Failed() bool + Parallel() + Skip(args ...interface{}) + Skipf(format string, args ...interface{}) + SkipNow() + Skipped() bool +} + +//Custom Ginkgo test reporters must implement the Reporter interface. +// +//The custom reporter is passed in a SuiteSummary when the suite begins and ends, +//and a SpecSummary just before a spec begins and just after a spec ends +type Reporter reporters.Reporter + +//Asynchronous specs are given a channel of the Done type. You must close or write to the channel +//to tell Ginkgo that your async test is done. +type Done chan<- interface{} + +//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription +// FullTestText: a concatenation of ComponentTexts and the TestText +// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test +// TestText: the text in the actual It or Measure node +// IsMeasurement: true if the current test is a measurement +// FileName: the name of the file containing the current test +// LineNumber: the line number for the current test +// Failed: if the current test has failed, this will be true (useful in an AfterEach) +type GinkgoTestDescription struct { + FullTestText string + ComponentTexts []string + TestText string + + IsMeasurement bool + + FileName string + LineNumber int + + Failed bool +} + +//CurrentGinkgoTestDescripton returns information about the current running test. +func CurrentGinkgoTestDescription() GinkgoTestDescription { + summary, ok := globalSuite.CurrentRunningSpecSummary() + if !ok { + return GinkgoTestDescription{} + } + + subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1] + + return GinkgoTestDescription{ + ComponentTexts: summary.ComponentTexts[1:], + FullTestText: strings.Join(summary.ComponentTexts[1:], " "), + TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1], + IsMeasurement: summary.IsMeasurement, + FileName: subjectCodeLocation.FileName, + LineNumber: subjectCodeLocation.LineNumber, + Failed: summary.HasFailureState(), + } +} + +//Measurement tests receive a Benchmarker. +// +//You use the Time() function to time how long the passed in body function takes to run +//You use the RecordValue() function to track arbitrary numerical measurements. +//The RecordValueWithPrecision() function can be used alternatively to provide the unit +//and resolution of the numeric measurement. +//The optional info argument is passed to the test reporter and can be used to +// provide the measurement data to a custom reporter with context. +// +//See http://onsi.github.io/ginkgo/#benchmark_tests for more details +type Benchmarker interface { + Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) + RecordValue(name string, value float64, info ...interface{}) + RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) +} + +//RunSpecs is the entry point for the Ginkgo test runner. +//You must call this within a Golang testing TestX(t *testing.T) function. +// +//To bootstrap a test suite you can use the Ginkgo CLI: +// +// ginkgo bootstrap +func RunSpecs(t GinkgoTestingT, description string) bool { + specReporters := []Reporter{buildDefaultReporter()} + return RunSpecsWithCustomReporters(t, description, specReporters) +} + +//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace +//RunSpecs() with this method. +func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...) + return RunSpecsWithCustomReporters(t, description, specReporters) +} + +//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace +//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter +func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + writer := GinkgoWriter.(*writer.Writer) + writer.SetStream(config.DefaultReporterConfig.Verbose) + reporters := make([]reporters.Reporter, len(specReporters)) + for i, reporter := range specReporters { + reporters[i] = reporter + } + passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig) + if passed && hasFocusedTests { + fmt.Println("PASS | FOCUSED") + os.Exit(types.GINKGO_FOCUS_EXIT_CODE) + } + return passed +} + +func buildDefaultReporter() Reporter { + remoteReportingServer := config.GinkgoConfig.StreamHost + if remoteReportingServer == "" { + stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1) + return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer) + } else { + return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor()) + } +} + +//Skip notifies Ginkgo that the current spec should be skipped. +func Skip(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + globalFailer.Skip(message, codelocation.New(skip+1)) + panic(GINKGO_PANIC) +} + +//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.) +func Fail(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + globalFailer.Fail(message, codelocation.New(skip+1)) + panic(GINKGO_PANIC) +} + +//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` +//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that +//calls out to Gomega +// +//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent +//further assertions from running. This panic must be recovered. Ginkgo does this for you +//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...) +// +//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no +//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine. +func GinkgoRecover() { + e := recover() + if e != nil { + globalFailer.Panic(codelocation.New(1), e) + } +} + +//Describe blocks allow you to organize your specs. A Describe block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts. +func Describe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FDescribe +func FDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PDescribe +func PDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XDescribe +func XDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//Context blocks allow you to organize your specs. A Context block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts. +func Context(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FContext +func FContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PContext +func PContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XContext +func XContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks +//within an It block. +// +//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a +//function that accepts a Done channel. When you do this, you can also provide an optional timeout. +func It(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can focus individual Its using FIt +func FIt(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can mark Its as pending using PIt +func PIt(text string, _ ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Its as pending using XIt +func XIt(text string, _ ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//Specify blocks are aliases for It blocks and allow for more natural wording in situations +//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks +//which apply to It blocks. +func Specify(text string, body interface{}, timeout ...float64) bool { + return It(text, body, timeout...) +} + +//You can focus individual Specifys using FSpecify +func FSpecify(text string, body interface{}, timeout ...float64) bool { + return FIt(text, body, timeout...) +} + +//You can mark Specifys as pending using PSpecify +func PSpecify(text string, is ...interface{}) bool { + return PIt(text, is...) +} + +//You can mark Specifys as pending using XSpecify +func XSpecify(text string, is ...interface{}) bool { + return XIt(text, is...) +} + +//By allows you to better document large Its. +// +//Generally you should try to keep your Its short and to the point. This is not always possible, however, +//especially in the context of integration tests that capture a particular workflow. +// +//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...) +//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function. +func By(text string, callbacks ...func()) { + preamble := "\x1b[1mSTEP\x1b[0m" + if config.DefaultReporterConfig.NoColor { + preamble = "STEP" + } + fmt.Fprintln(GinkgoWriter, preamble+": "+text) + if len(callbacks) == 1 { + callbacks[0]() + } + if len(callbacks) > 1 { + panic("just one callback per By, please") + } +} + +//Measure blocks run the passed in body function repeatedly (determined by the samples argument) +//and accumulate metrics provided to the Benchmarker by the body function. +// +//The body function must have the signature: +// func(b Benchmarker) +func Measure(text string, body interface{}, samples int) bool { + globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples) + return true +} + +//You can focus individual Measures using FMeasure +func FMeasure(text string, body interface{}, samples int) bool { + globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples) + return true +} + +//You can mark Maeasurements as pending using PMeasure +func PMeasure(text string, _ ...interface{}) bool { + globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Maeasurements as pending using XMeasure +func XMeasure(text string, _ ...interface{}) bool { + globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each +//parallel node process will call BeforeSuite. +// +//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel +// +//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. +func BeforeSuite(body interface{}, timeout ...float64) bool { + globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed. +//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting. +// +//When running in parallel, each parallel node process will call AfterSuite. +// +//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel +// +//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. +func AfterSuite(body interface{}, timeout ...float64) bool { + globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across +//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that +//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait +//until that node is done before running. +// +//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is +//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1) +//to the second function (on all the other nodes). +// +//The functions have the following signatures. The first function (which only runs on node 1) has the signature: +// +// func() []byte +// +//or, to run asynchronously: +// +// func(done Done) []byte +// +//The byte array returned by the first function is then passed to the second function, which has the signature: +// +// func(data []byte) +// +//or, to run asynchronously: +// +// func(data []byte, done Done) +// +//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes: +// +// var dbClient db.Client +// var dbRunner db.Runner +// +// var _ = SynchronizedBeforeSuite(func() []byte { +// dbRunner = db.NewRunner() +// err := dbRunner.Start() +// Ω(err).ShouldNot(HaveOccurred()) +// return []byte(dbRunner.URL) +// }, func(data []byte) { +// dbClient = db.NewClient() +// err := dbClient.Connect(string(data)) +// Ω(err).ShouldNot(HaveOccurred()) +// }) +func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool { + globalSuite.SetSynchronizedBeforeSuiteNode( + node1Body, + allNodesBody, + codelocation.New(1), + parseTimeout(timeout...), + ) + return true +} + +//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up +//external singleton resources shared across nodes when running tests in parallel. +// +//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1 +//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until +//all other nodes are finished. +// +//Both functions have the same signature: either func() or func(done Done) to run asynchronously. +// +//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database +//only after all nodes have finished: +// +// var _ = SynchronizedAfterSuite(func() { +// dbClient.Cleanup() +// }, func() { +// dbRunner.Stop() +// }) +func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool { + globalSuite.SetSynchronizedAfterSuiteNode( + allNodesBody, + node1Body, + codelocation.New(1), + parseTimeout(timeout...), + ) + return true +} + +//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested +//Describe and Context blocks the outermost BeforeEach blocks are run first. +// +//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func BeforeEach(body interface{}, timeout ...float64) bool { + globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details, +//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) +// +//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func JustBeforeEach(body interface{}, timeout ...float64) bool { + globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested +//Describe and Context blocks the innermost AfterEach blocks are run first. +// +//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func AfterEach(body interface{}, timeout ...float64) bool { + globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +func parseTimeout(timeout ...float64) time.Duration { + if len(timeout) == 0 { + return time.Duration(defaultTimeout * int64(time.Second)) + } else { + return time.Duration(timeout[0] * float64(time.Second)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go new file mode 100644 index 00000000000..fa2f0bf730c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go @@ -0,0 +1,32 @@ +package codelocation + +import ( + "regexp" + "runtime" + "runtime/debug" + "strings" + + "github.com/onsi/ginkgo/types" +) + +func New(skip int) types.CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + stackTrace := PruneStack(string(debug.Stack()), skip) + return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} +} + +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + if !re.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go new file mode 100644 index 00000000000..f06abf3c560 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go @@ -0,0 +1,13 @@ +package codelocation_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestCodelocation(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "CodeLocation Suite") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go new file mode 100644 index 00000000000..55a9e9d036c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go @@ -0,0 +1,79 @@ +package codelocation_test + +import ( + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/types" + . "github.com/onsi/gomega" + "runtime" +) + +var _ = Describe("CodeLocation", func() { + var ( + codeLocation types.CodeLocation + expectedFileName string + expectedLineNumber int + ) + + caller0 := func() { + codeLocation = codelocation.New(1) + } + + caller1 := func() { + _, expectedFileName, expectedLineNumber, _ = runtime.Caller(0) + expectedLineNumber += 2 + caller0() + } + + BeforeEach(func() { + caller1() + }) + + It("should use the passed in skip parameter to pick out the correct file & line number", func() { + Ω(codeLocation.FileName).Should(Equal(expectedFileName)) + Ω(codeLocation.LineNumber).Should(Equal(expectedLineNumber)) + }) + + Describe("stringer behavior", func() { + It("should stringify nicely", func() { + Ω(codeLocation.String()).Should(ContainSubstring("code_location_test.go:%d", expectedLineNumber)) + }) + }) + + //There's no better way than to test this private method as it + //goes out of its way to prune out ginkgo related code in the stack trace + Describe("PruneStack", func() { + It("should remove any references to ginkgo and pkg/testing and pkg/runtime", func() { + input := `/Skip/me +Skip: skip() +/Skip/me +Skip: skip() +/Users/whoever/gospace/src/github.com/onsi/ginkgo/whatever.go:10 (0x12314) +Something: Func() +/Users/whoever/gospace/src/github.com/onsi/ginkgo/whatever_else.go:10 (0x12314) +SomethingInternalToGinkgo: Func() +/usr/goroot/pkg/strings/oops.go:10 (0x12341) +Oops: BlowUp() +/Users/whoever/gospace/src/mycode/code.go:10 (0x12341) +MyCode: Func() +/Users/whoever/gospace/src/mycode/code_test.go:10 (0x12341) +MyCodeTest: Func() +/Users/whoever/gospace/src/mycode/code_suite_test.go:12 (0x37f08) +TestFoo: RunSpecs(t, "Foo Suite") +/usr/goroot/pkg/testing/testing.go:12 (0x37f08) +TestingT: Blah() +/usr/goroot/pkg/runtime/runtime.go:12 (0x37f08) +Something: Func() +` + prunedStack := codelocation.PruneStack(input, 1) + Ω(prunedStack).Should(Equal(`/usr/goroot/pkg/strings/oops.go:10 (0x12341) +Oops: BlowUp() +/Users/whoever/gospace/src/mycode/code.go:10 (0x12341) +MyCode: Func() +/Users/whoever/gospace/src/mycode/code_test.go:10 (0x12341) +MyCodeTest: Func() +/Users/whoever/gospace/src/mycode/code_suite_test.go:12 (0x37f08) +TestFoo: RunSpecs(t, "Foo Suite")`)) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go new file mode 100644 index 00000000000..0737746dcfe --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go @@ -0,0 +1,151 @@ +package containernode + +import ( + "math/rand" + "sort" + + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +type subjectOrContainerNode struct { + containerNode *ContainerNode + subjectNode leafnodes.SubjectNode +} + +func (n subjectOrContainerNode) text() string { + if n.containerNode != nil { + return n.containerNode.Text() + } else { + return n.subjectNode.Text() + } +} + +type CollatedNodes struct { + Containers []*ContainerNode + Subject leafnodes.SubjectNode +} + +type ContainerNode struct { + text string + flag types.FlagType + codeLocation types.CodeLocation + + setupNodes []leafnodes.BasicNode + subjectAndContainerNodes []subjectOrContainerNode +} + +func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode { + return &ContainerNode{ + text: text, + flag: flag, + codeLocation: codeLocation, + } +} + +func (container *ContainerNode) Shuffle(r *rand.Rand) { + sort.Sort(container) + permutation := r.Perm(len(container.subjectAndContainerNodes)) + shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes)) + for i, j := range permutation { + shuffledNodes[i] = container.subjectAndContainerNodes[j] + } + container.subjectAndContainerNodes = shuffledNodes +} + +func (node *ContainerNode) BackPropagateProgrammaticFocus() bool { + if node.flag == types.FlagTypePending { + return false + } + + shouldUnfocus := false + for _, subjectOrContainerNode := range node.subjectAndContainerNodes { + if subjectOrContainerNode.containerNode != nil { + shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus + } else { + shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus + } + } + + if shouldUnfocus { + if node.flag == types.FlagTypeFocused { + node.flag = types.FlagTypeNone + } + return true + } + + return node.flag == types.FlagTypeFocused +} + +func (node *ContainerNode) Collate() []CollatedNodes { + return node.collate([]*ContainerNode{}) +} + +func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes { + collated := make([]CollatedNodes, 0) + + containers := make([]*ContainerNode, len(enclosingContainers)) + copy(containers, enclosingContainers) + containers = append(containers, node) + + for _, subjectOrContainer := range node.subjectAndContainerNodes { + if subjectOrContainer.containerNode != nil { + collated = append(collated, subjectOrContainer.containerNode.collate(containers)...) + } else { + collated = append(collated, CollatedNodes{ + Containers: containers, + Subject: subjectOrContainer.subjectNode, + }) + } + } + + return collated +} + +func (node *ContainerNode) PushContainerNode(container *ContainerNode) { + node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container}) +} + +func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) { + node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject}) +} + +func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) { + node.setupNodes = append(node.setupNodes, setupNode) +} + +func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode { + nodes := []leafnodes.BasicNode{} + for _, setupNode := range node.setupNodes { + if setupNode.Type() == nodeType { + nodes = append(nodes, setupNode) + } + } + return nodes +} + +func (node *ContainerNode) Text() string { + return node.text +} + +func (node *ContainerNode) CodeLocation() types.CodeLocation { + return node.codeLocation +} + +func (node *ContainerNode) Flag() types.FlagType { + return node.flag +} + +//sort.Interface + +func (node *ContainerNode) Len() int { + return len(node.subjectAndContainerNodes) +} + +func (node *ContainerNode) Less(i, j int) bool { + return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text() +} + +func (node *ContainerNode) Swap(i, j int) { + node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i] +} diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go new file mode 100644 index 00000000000..c6fc314ff57 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go @@ -0,0 +1,13 @@ +package containernode_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestContainernode(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Containernode Suite") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go new file mode 100644 index 00000000000..b8d61b13fc2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go @@ -0,0 +1,212 @@ +package containernode_test + +import ( + "github.com/onsi/ginkgo/internal/leafnodes" + "math/rand" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/internal/codelocation" + . "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/types" +) + +var _ = Describe("Container Node", func() { + var ( + codeLocation types.CodeLocation + container *ContainerNode + ) + + BeforeEach(func() { + codeLocation = codelocation.New(0) + container = New("description text", types.FlagTypeFocused, codeLocation) + }) + + Describe("creating a container node", func() { + It("can answer questions about itself", func() { + Ω(container.Text()).Should(Equal("description text")) + Ω(container.Flag()).Should(Equal(types.FlagTypeFocused)) + Ω(container.CodeLocation()).Should(Equal(codeLocation)) + }) + }) + + Describe("pushing setup nodes", func() { + It("can append setup nodes of various types and fetch them by type", func() { + befA := leafnodes.NewBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0) + befB := leafnodes.NewBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0) + aftA := leafnodes.NewAfterEachNode(func() {}, codelocation.New(0), 0, nil, 0) + aftB := leafnodes.NewAfterEachNode(func() {}, codelocation.New(0), 0, nil, 0) + jusBefA := leafnodes.NewJustBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0) + jusBefB := leafnodes.NewJustBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0) + + container.PushSetupNode(befA) + container.PushSetupNode(befB) + container.PushSetupNode(aftA) + container.PushSetupNode(aftB) + container.PushSetupNode(jusBefA) + container.PushSetupNode(jusBefB) + + subject := leafnodes.NewItNode("subject", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) + container.PushSubjectNode(subject) + + Ω(container.SetupNodesOfType(types.SpecComponentTypeBeforeEach)).Should(Equal([]leafnodes.BasicNode{befA, befB})) + Ω(container.SetupNodesOfType(types.SpecComponentTypeAfterEach)).Should(Equal([]leafnodes.BasicNode{aftA, aftB})) + Ω(container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach)).Should(Equal([]leafnodes.BasicNode{jusBefA, jusBefB})) + Ω(container.SetupNodesOfType(types.SpecComponentTypeIt)).Should(BeEmpty()) //subjects are not setup nodes + }) + }) + + Context("With appended containers and subject nodes", func() { + var ( + itA, itB, innerItA, innerItB leafnodes.SubjectNode + innerContainer *ContainerNode + ) + + BeforeEach(func() { + itA = leafnodes.NewItNode("Banana", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) + itB = leafnodes.NewItNode("Apple", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) + + innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) + innerItB = leafnodes.NewItNode("inner B", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) + + innerContainer = New("Orange", types.FlagTypeNone, codelocation.New(0)) + + container.PushSubjectNode(itA) + container.PushContainerNode(innerContainer) + innerContainer.PushSubjectNode(innerItA) + innerContainer.PushSubjectNode(innerItB) + container.PushSubjectNode(itB) + }) + + Describe("Collating", func() { + It("should return a collated set of containers and subject nodes in the correct order", func() { + collated := container.Collate() + Ω(collated).Should(HaveLen(4)) + + Ω(collated[0]).Should(Equal(CollatedNodes{ + Containers: []*ContainerNode{container}, + Subject: itA, + })) + + Ω(collated[1]).Should(Equal(CollatedNodes{ + Containers: []*ContainerNode{container, innerContainer}, + Subject: innerItA, + })) + + Ω(collated[2]).Should(Equal(CollatedNodes{ + Containers: []*ContainerNode{container, innerContainer}, + Subject: innerItB, + })) + + Ω(collated[3]).Should(Equal(CollatedNodes{ + Containers: []*ContainerNode{container}, + Subject: itB, + })) + }) + }) + + Describe("Backpropagating Programmatic Focus", func() { + //This allows inner focused specs to override the focus of outer focussed + //specs and more closely maps to what a developer wants to happen + //when debugging a test suite + + Context("when a parent is focused *and* an inner subject is focused", func() { + BeforeEach(func() { + container = New("description text", types.FlagTypeFocused, codeLocation) + itA = leafnodes.NewItNode("A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) + container.PushSubjectNode(itA) + + innerContainer = New("Orange", types.FlagTypeNone, codelocation.New(0)) + container.PushContainerNode(innerContainer) + innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeFocused, codelocation.New(0), 0, nil, 0) + innerContainer.PushSubjectNode(innerItA) + }) + + It("should unfocus the parent", func() { + container.BackPropagateProgrammaticFocus() + + Ω(container.Flag()).Should(Equal(types.FlagTypeNone)) + Ω(itA.Flag()).Should(Equal(types.FlagTypeNone)) + Ω(innerContainer.Flag()).Should(Equal(types.FlagTypeNone)) + Ω(innerItA.Flag()).Should(Equal(types.FlagTypeFocused)) + }) + }) + + Context("when a parent is focused *and* an inner container is focused", func() { + BeforeEach(func() { + container = New("description text", types.FlagTypeFocused, codeLocation) + itA = leafnodes.NewItNode("A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) + container.PushSubjectNode(itA) + + innerContainer = New("Orange", types.FlagTypeFocused, codelocation.New(0)) + container.PushContainerNode(innerContainer) + innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) + innerContainer.PushSubjectNode(innerItA) + }) + + It("should unfocus the parent", func() { + container.BackPropagateProgrammaticFocus() + + Ω(container.Flag()).Should(Equal(types.FlagTypeNone)) + Ω(itA.Flag()).Should(Equal(types.FlagTypeNone)) + Ω(innerContainer.Flag()).Should(Equal(types.FlagTypeFocused)) + Ω(innerItA.Flag()).Should(Equal(types.FlagTypeNone)) + }) + }) + + Context("when a parent is pending and a child is focused", func() { + BeforeEach(func() { + container = New("description text", types.FlagTypeFocused, codeLocation) + itA = leafnodes.NewItNode("A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) + container.PushSubjectNode(itA) + + innerContainer = New("Orange", types.FlagTypePending, codelocation.New(0)) + container.PushContainerNode(innerContainer) + innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeFocused, codelocation.New(0), 0, nil, 0) + innerContainer.PushSubjectNode(innerItA) + }) + + It("should not do anything", func() { + container.BackPropagateProgrammaticFocus() + + Ω(container.Flag()).Should(Equal(types.FlagTypeFocused)) + Ω(itA.Flag()).Should(Equal(types.FlagTypeNone)) + Ω(innerContainer.Flag()).Should(Equal(types.FlagTypePending)) + Ω(innerItA.Flag()).Should(Equal(types.FlagTypeFocused)) + }) + }) + }) + + Describe("Shuffling", func() { + var unshuffledCollation []CollatedNodes + BeforeEach(func() { + unshuffledCollation = container.Collate() + + r := rand.New(rand.NewSource(17)) + container.Shuffle(r) + }) + + It("should sort, and then shuffle, the top level contents of the container", func() { + shuffledCollation := container.Collate() + Ω(shuffledCollation).Should(HaveLen(len(unshuffledCollation))) + Ω(shuffledCollation).ShouldNot(Equal(unshuffledCollation)) + + for _, entry := range unshuffledCollation { + Ω(shuffledCollation).Should(ContainElement(entry)) + } + + innerAIndex, innerBIndex := 0, 0 + for i, entry := range shuffledCollation { + if entry.Subject == innerItA { + innerAIndex = i + } else if entry.Subject == innerItB { + innerBIndex = i + } + } + + Ω(innerAIndex).Should(Equal(innerBIndex - 1)) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go new file mode 100644 index 00000000000..678ea2514a6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go @@ -0,0 +1,92 @@ +package failer + +import ( + "fmt" + "sync" + + "github.com/onsi/ginkgo/types" +) + +type Failer struct { + lock *sync.Mutex + failure types.SpecFailure + state types.SpecState +} + +func New() *Failer { + return &Failer{ + lock: &sync.Mutex{}, + state: types.SpecStatePassed, + } +} + +func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStatePanicked + f.failure = types.SpecFailure{ + Message: "Test Panicked", + Location: location, + ForwardedPanic: fmt.Sprintf("%v", forwardedPanic), + } + } +} + +func (f *Failer) Timeout(location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateTimedOut + f.failure = types.SpecFailure{ + Message: "Timed out", + Location: location, + } + } +} + +func (f *Failer) Fail(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateFailed + f.failure = types.SpecFailure{ + Message: message, + Location: location, + } + } +} + +func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) { + f.lock.Lock() + defer f.lock.Unlock() + + failure := f.failure + outcome := f.state + if outcome != types.SpecStatePassed { + failure.ComponentType = componentType + failure.ComponentIndex = componentIndex + failure.ComponentCodeLocation = componentCodeLocation + } + + f.state = types.SpecStatePassed + f.failure = types.SpecFailure{} + + return failure, outcome +} + +func (f *Failer) Skip(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateSkipped + f.failure = types.SpecFailure{ + Message: message, + Location: location, + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go new file mode 100644 index 00000000000..8dce7be9ac5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go @@ -0,0 +1,13 @@ +package failer_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestFailer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Failer Suite") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go new file mode 100644 index 00000000000..65210a40a7c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/failer/failer_test.go @@ -0,0 +1,141 @@ +package failer_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/failer" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/types" +) + +var _ = Describe("Failer", func() { + var ( + failer *Failer + codeLocationA types.CodeLocation + codeLocationB types.CodeLocation + ) + + BeforeEach(func() { + codeLocationA = codelocation.New(0) + codeLocationB = codelocation.New(0) + failer = New() + }) + + Context("with no failures", func() { + It("should return success when drained", func() { + failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) + Ω(failure).Should(BeZero()) + Ω(state).Should(Equal(types.SpecStatePassed)) + }) + }) + + Describe("Skip", func() { + It("should handle failures", func() { + failer.Skip("something skipped", codeLocationA) + failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) + Ω(failure).Should(Equal(types.SpecFailure{ + Message: "something skipped", + Location: codeLocationA, + ForwardedPanic: "", + ComponentType: types.SpecComponentTypeIt, + ComponentIndex: 3, + ComponentCodeLocation: codeLocationB, + })) + Ω(state).Should(Equal(types.SpecStateSkipped)) + }) + }) + + Describe("Fail", func() { + It("should handle failures", func() { + failer.Fail("something failed", codeLocationA) + failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) + Ω(failure).Should(Equal(types.SpecFailure{ + Message: "something failed", + Location: codeLocationA, + ForwardedPanic: "", + ComponentType: types.SpecComponentTypeIt, + ComponentIndex: 3, + ComponentCodeLocation: codeLocationB, + })) + Ω(state).Should(Equal(types.SpecStateFailed)) + }) + }) + + Describe("Panic", func() { + It("should handle panics", func() { + failer.Panic(codeLocationA, "some forwarded panic") + failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) + Ω(failure).Should(Equal(types.SpecFailure{ + Message: "Test Panicked", + Location: codeLocationA, + ForwardedPanic: "some forwarded panic", + ComponentType: types.SpecComponentTypeIt, + ComponentIndex: 3, + ComponentCodeLocation: codeLocationB, + })) + Ω(state).Should(Equal(types.SpecStatePanicked)) + }) + }) + + Describe("Timeout", func() { + It("should handle timeouts", func() { + failer.Timeout(codeLocationA) + failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) + Ω(failure).Should(Equal(types.SpecFailure{ + Message: "Timed out", + Location: codeLocationA, + ForwardedPanic: "", + ComponentType: types.SpecComponentTypeIt, + ComponentIndex: 3, + ComponentCodeLocation: codeLocationB, + })) + Ω(state).Should(Equal(types.SpecStateTimedOut)) + }) + }) + + Context("when multiple failures are registered", func() { + BeforeEach(func() { + failer.Fail("something failed", codeLocationA) + failer.Fail("something else failed", codeLocationA) + }) + + It("should only report the first one when drained", func() { + failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) + + Ω(failure).Should(Equal(types.SpecFailure{ + Message: "something failed", + Location: codeLocationA, + ForwardedPanic: "", + ComponentType: types.SpecComponentTypeIt, + ComponentIndex: 3, + ComponentCodeLocation: codeLocationB, + })) + Ω(state).Should(Equal(types.SpecStateFailed)) + }) + + It("should report subsequent failures after being drained", func() { + failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) + failer.Fail("yet another thing failed", codeLocationA) + + failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) + + Ω(failure).Should(Equal(types.SpecFailure{ + Message: "yet another thing failed", + Location: codeLocationA, + ForwardedPanic: "", + ComponentType: types.SpecComponentTypeIt, + ComponentIndex: 3, + ComponentCodeLocation: codeLocationB, + })) + Ω(state).Should(Equal(types.SpecStateFailed)) + }) + + It("should report sucess on subsequent drains if no errors occur", func() { + failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) + failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) + Ω(failure).Should(BeZero()) + Ω(state).Should(Equal(types.SpecStatePassed)) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go new file mode 100644 index 00000000000..9c3eed2b6fe --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go @@ -0,0 +1,103 @@ +package leafnodes + +import ( + "math" + "time" + + "sync" + + "github.com/onsi/ginkgo/types" +) + +type benchmarker struct { + mu sync.Mutex + measurements map[string]*types.SpecMeasurement + orderCounter int +} + +func newBenchmarker() *benchmarker { + return &benchmarker{ + measurements: make(map[string]*types.SpecMeasurement, 0), + } +} + +func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) { + t := time.Now() + body() + elapsedTime = time.Since(t) + + b.mu.Lock() + defer b.mu.Unlock() + measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...) + measurement.Results = append(measurement.Results, elapsedTime.Seconds()) + + return +} + +func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) { + measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...) + b.mu.Lock() + defer b.mu.Unlock() + measurement.Results = append(measurement.Results, value) +} + +func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) { + measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...) + b.mu.Lock() + defer b.mu.Unlock() + measurement.Results = append(measurement.Results, value) +} + +func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement { + measurement, ok := b.measurements[name] + if !ok { + var computedInfo interface{} + computedInfo = nil + if len(info) > 0 { + computedInfo = info[0] + } + measurement = &types.SpecMeasurement{ + Name: name, + Info: computedInfo, + Order: b.orderCounter, + SmallestLabel: smallestLabel, + LargestLabel: largestLabel, + AverageLabel: averageLabel, + Units: units, + Precision: precision, + Results: make([]float64, 0), + } + b.measurements[name] = measurement + b.orderCounter++ + } + + return measurement +} + +func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement { + b.mu.Lock() + defer b.mu.Unlock() + for _, measurement := range b.measurements { + measurement.Smallest = math.MaxFloat64 + measurement.Largest = -math.MaxFloat64 + sum := float64(0) + sumOfSquares := float64(0) + + for _, result := range measurement.Results { + if result > measurement.Largest { + measurement.Largest = result + } + if result < measurement.Smallest { + measurement.Smallest = result + } + sum += result + sumOfSquares += result * result + } + + n := float64(len(measurement.Results)) + measurement.Average = sum / n + measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n)) + } + + return b.measurements +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go new file mode 100644 index 00000000000..8c3902d601c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go @@ -0,0 +1,19 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/types" +) + +type BasicNode interface { + Type() types.SpecComponentType + Run() (types.SpecState, types.SpecFailure) + CodeLocation() types.CodeLocation +} + +type SubjectNode interface { + BasicNode + + Text() string + Flag() types.FlagType + Samples() int +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go new file mode 100644 index 00000000000..c76fe3a4512 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go @@ -0,0 +1,46 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "time" +) + +type ItNode struct { + runner *runner + + flag types.FlagType + text string +} + +func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode { + return &ItNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex), + flag: flag, + text: text, + } +} + +func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *ItNode) Type() types.SpecComponentType { + return types.SpecComponentTypeIt +} + +func (node *ItNode) Text() string { + return node.text +} + +func (node *ItNode) Flag() types.FlagType { + return node.flag +} + +func (node *ItNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func (node *ItNode) Samples() int { + return 1 +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go new file mode 100644 index 00000000000..29fa0c6e2a6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go @@ -0,0 +1,22 @@ +package leafnodes_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/leafnodes" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/types" +) + +var _ = Describe("It Nodes", func() { + It("should report the correct type, text, flag, and code location", func() { + codeLocation := codelocation.New(0) + it := NewItNode("my it node", func() {}, types.FlagTypeFocused, codeLocation, 0, nil, 3) + Ω(it.Type()).Should(Equal(types.SpecComponentTypeIt)) + Ω(it.Flag()).Should(Equal(types.FlagTypeFocused)) + Ω(it.Text()).Should(Equal("my it node")) + Ω(it.CodeLocation()).Should(Equal(codeLocation)) + Ω(it.Samples()).Should(Equal(1)) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go new file mode 100644 index 00000000000..a7ba9e006ee --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go @@ -0,0 +1,13 @@ +package leafnodes_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestLeafNode(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "LeafNode Suite") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go new file mode 100644 index 00000000000..efc3348c1b6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go @@ -0,0 +1,61 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "reflect" +) + +type MeasureNode struct { + runner *runner + + text string + flag types.FlagType + samples int + benchmarker *benchmarker +} + +func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode { + benchmarker := newBenchmarker() + + wrappedBody := func() { + reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)}) + } + + return &MeasureNode{ + runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex), + + text: text, + flag: flag, + samples: samples, + benchmarker: benchmarker, + } +} + +func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement { + return node.benchmarker.measurementsReport() +} + +func (node *MeasureNode) Type() types.SpecComponentType { + return types.SpecComponentTypeMeasure +} + +func (node *MeasureNode) Text() string { + return node.text +} + +func (node *MeasureNode) Flag() types.FlagType { + return node.flag +} + +func (node *MeasureNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func (node *MeasureNode) Samples() int { + return node.samples +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go new file mode 100644 index 00000000000..252065b8715 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go @@ -0,0 +1,154 @@ +package leafnodes_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/leafnodes" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/internal/codelocation" + Failer "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "time" +) + +var _ = Describe("Measure Nodes", func() { + It("should report the correct type, text, flag, and code location", func() { + codeLocation := codelocation.New(0) + measure := NewMeasureNode("my measure node", func(b Benchmarker) {}, types.FlagTypeFocused, codeLocation, 10, nil, 3) + Ω(measure.Type()).Should(Equal(types.SpecComponentTypeMeasure)) + Ω(measure.Flag()).Should(Equal(types.FlagTypeFocused)) + Ω(measure.Text()).Should(Equal("my measure node")) + Ω(measure.CodeLocation()).Should(Equal(codeLocation)) + Ω(measure.Samples()).Should(Equal(10)) + }) + + Describe("benchmarking", func() { + var measure *MeasureNode + + Describe("Value", func() { + BeforeEach(func() { + measure = NewMeasureNode("the measurement", func(b Benchmarker) { + b.RecordValue("foo", 7, "info!") + b.RecordValue("foo", 2) + b.RecordValue("foo", 3) + b.RecordValue("bar", 0.3) + b.RecordValue("bar", 0.1) + b.RecordValue("bar", 0.5) + b.RecordValue("bar", 0.7) + }, types.FlagTypeFocused, codelocation.New(0), 1, Failer.New(), 3) + Ω(measure.Run()).Should(Equal(types.SpecStatePassed)) + }) + + It("records passed in values and reports on them", func() { + report := measure.MeasurementsReport() + Ω(report).Should(HaveLen(2)) + Ω(report["foo"].Name).Should(Equal("foo")) + Ω(report["foo"].Info).Should(Equal("info!")) + Ω(report["foo"].Order).Should(Equal(0)) + Ω(report["foo"].SmallestLabel).Should(Equal("Smallest")) + Ω(report["foo"].LargestLabel).Should(Equal(" Largest")) + Ω(report["foo"].AverageLabel).Should(Equal(" Average")) + Ω(report["foo"].Units).Should(Equal("")) + Ω(report["foo"].Results).Should(Equal([]float64{7, 2, 3})) + Ω(report["foo"].Smallest).Should(BeNumerically("==", 2)) + Ω(report["foo"].Largest).Should(BeNumerically("==", 7)) + Ω(report["foo"].Average).Should(BeNumerically("==", 4)) + Ω(report["foo"].StdDeviation).Should(BeNumerically("~", 2.16, 0.01)) + + Ω(report["bar"].Name).Should(Equal("bar")) + Ω(report["bar"].Info).Should(BeNil()) + Ω(report["bar"].SmallestLabel).Should(Equal("Smallest")) + Ω(report["bar"].Order).Should(Equal(1)) + Ω(report["bar"].LargestLabel).Should(Equal(" Largest")) + Ω(report["bar"].AverageLabel).Should(Equal(" Average")) + Ω(report["bar"].Units).Should(Equal("")) + Ω(report["bar"].Results).Should(Equal([]float64{0.3, 0.1, 0.5, 0.7})) + Ω(report["bar"].Smallest).Should(BeNumerically("==", 0.1)) + Ω(report["bar"].Largest).Should(BeNumerically("==", 0.7)) + Ω(report["bar"].Average).Should(BeNumerically("==", 0.4)) + Ω(report["bar"].StdDeviation).Should(BeNumerically("~", 0.22, 0.01)) + }) + }) + + Describe("Value with precision", func() { + BeforeEach(func() { + measure = NewMeasureNode("the measurement", func(b Benchmarker) { + b.RecordValueWithPrecision("foo", 7, "ms", 7, "info!") + b.RecordValueWithPrecision("foo", 2, "ms", 6) + b.RecordValueWithPrecision("foo", 3, "ms", 5) + b.RecordValueWithPrecision("bar", 0.3, "ns", 4) + b.RecordValueWithPrecision("bar", 0.1, "ns", 3) + b.RecordValueWithPrecision("bar", 0.5, "ns", 2) + b.RecordValueWithPrecision("bar", 0.7, "ns", 1) + }, types.FlagTypeFocused, codelocation.New(0), 1, Failer.New(), 3) + Ω(measure.Run()).Should(Equal(types.SpecStatePassed)) + }) + + It("records passed in values and reports on them", func() { + report := measure.MeasurementsReport() + Ω(report).Should(HaveLen(2)) + Ω(report["foo"].Name).Should(Equal("foo")) + Ω(report["foo"].Info).Should(Equal("info!")) + Ω(report["foo"].Order).Should(Equal(0)) + Ω(report["foo"].SmallestLabel).Should(Equal("Smallest")) + Ω(report["foo"].LargestLabel).Should(Equal(" Largest")) + Ω(report["foo"].AverageLabel).Should(Equal(" Average")) + Ω(report["foo"].Units).Should(Equal("ms")) + Ω(report["foo"].Results).Should(Equal([]float64{7, 2, 3})) + Ω(report["foo"].Smallest).Should(BeNumerically("==", 2)) + Ω(report["foo"].Largest).Should(BeNumerically("==", 7)) + Ω(report["foo"].Average).Should(BeNumerically("==", 4)) + Ω(report["foo"].StdDeviation).Should(BeNumerically("~", 2.16, 0.01)) + + Ω(report["bar"].Name).Should(Equal("bar")) + Ω(report["bar"].Info).Should(BeNil()) + Ω(report["bar"].SmallestLabel).Should(Equal("Smallest")) + Ω(report["bar"].Order).Should(Equal(1)) + Ω(report["bar"].LargestLabel).Should(Equal(" Largest")) + Ω(report["bar"].AverageLabel).Should(Equal(" Average")) + Ω(report["bar"].Units).Should(Equal("ns")) + Ω(report["bar"].Results).Should(Equal([]float64{0.3, 0.1, 0.5, 0.7})) + Ω(report["bar"].Smallest).Should(BeNumerically("==", 0.1)) + Ω(report["bar"].Largest).Should(BeNumerically("==", 0.7)) + Ω(report["bar"].Average).Should(BeNumerically("==", 0.4)) + Ω(report["bar"].StdDeviation).Should(BeNumerically("~", 0.22, 0.01)) + }) + }) + + Describe("Time", func() { + BeforeEach(func() { + measure = NewMeasureNode("the measurement", func(b Benchmarker) { + b.Time("foo", func() { + time.Sleep(100 * time.Millisecond) + }, "info!") + b.Time("foo", func() { + time.Sleep(200 * time.Millisecond) + }) + b.Time("foo", func() { + time.Sleep(170 * time.Millisecond) + }) + }, types.FlagTypeFocused, codelocation.New(0), 1, Failer.New(), 3) + Ω(measure.Run()).Should(Equal(types.SpecStatePassed)) + }) + + It("records passed in values and reports on them", func() { + report := measure.MeasurementsReport() + Ω(report).Should(HaveLen(1)) + Ω(report["foo"].Name).Should(Equal("foo")) + Ω(report["foo"].Info).Should(Equal("info!")) + Ω(report["foo"].SmallestLabel).Should(Equal("Fastest Time")) + Ω(report["foo"].LargestLabel).Should(Equal("Slowest Time")) + Ω(report["foo"].AverageLabel).Should(Equal("Average Time")) + Ω(report["foo"].Units).Should(Equal("s")) + Ω(report["foo"].Results).Should(HaveLen(3)) + Ω(report["foo"].Results[0]).Should(BeNumerically("~", 0.1, 0.01)) + Ω(report["foo"].Results[1]).Should(BeNumerically("~", 0.2, 0.01)) + Ω(report["foo"].Results[2]).Should(BeNumerically("~", 0.17, 0.01)) + Ω(report["foo"].Smallest).Should(BeNumerically("~", 0.1, 0.01)) + Ω(report["foo"].Largest).Should(BeNumerically("~", 0.2, 0.01)) + Ω(report["foo"].Average).Should(BeNumerically("~", 0.16, 0.01)) + Ω(report["foo"].StdDeviation).Should(BeNumerically("~", 0.04, 0.01)) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go new file mode 100644 index 00000000000..870ad826da0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go @@ -0,0 +1,113 @@ +package leafnodes + +import ( + "fmt" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "reflect" + "time" +) + +type runner struct { + isAsync bool + asyncFunc func(chan<- interface{}) + syncFunc func() + codeLocation types.CodeLocation + timeoutThreshold time.Duration + nodeType types.SpecComponentType + componentIndex int + failer *failer.Failer +} + +func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner { + bodyType := reflect.TypeOf(body) + if bodyType.Kind() != reflect.Func { + panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation)) + } + + runner := &runner{ + codeLocation: codeLocation, + timeoutThreshold: timeout, + failer: failer, + nodeType: nodeType, + componentIndex: componentIndex, + } + + switch bodyType.NumIn() { + case 0: + runner.syncFunc = body.(func()) + return runner + case 1: + if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) { + panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation)) + } + + wrappedBody := func(done chan<- interface{}) { + bodyValue := reflect.ValueOf(body) + bodyValue.Call([]reflect.Value{reflect.ValueOf(done)}) + } + + runner.isAsync = true + runner.asyncFunc = wrappedBody + return runner + } + + panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation)) +} + +func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) { + if r.isAsync { + return r.runAsync() + } else { + return r.runSync() + } +} + +func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) { + done := make(chan interface{}, 1) + + go func() { + finished := false + + defer func() { + if e := recover(); e != nil || !finished { + r.failer.Panic(codelocation.New(2), e) + select { + case <-done: + break + default: + close(done) + } + } + }() + + r.asyncFunc(done) + finished = true + }() + + select { + case <-done: + case <-time.After(r.timeoutThreshold): + r.failer.Timeout(r.codeLocation) + } + + failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) + return +} +func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) { + finished := false + + defer func() { + if e := recover(); e != nil || !finished { + r.failer.Panic(codelocation.New(2), e) + } + + failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) + }() + + r.syncFunc() + finished = true + + return +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go new file mode 100644 index 00000000000..6b725a63153 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go @@ -0,0 +1,41 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "time" +) + +type SetupNode struct { + runner *runner +} + +func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *SetupNode) Type() types.SpecComponentType { + return node.runner.nodeType +} + +func (node *SetupNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex), + } +} + +func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex), + } +} + +func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex), + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go new file mode 100644 index 00000000000..d5b9251f652 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go @@ -0,0 +1,40 @@ +package leafnodes_test + +import ( + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/types" + . "github.com/onsi/gomega" + + . "github.com/onsi/ginkgo/internal/leafnodes" + + "github.com/onsi/ginkgo/internal/codelocation" +) + +var _ = Describe("Setup Nodes", func() { + Describe("BeforeEachNodes", func() { + It("should report the correct type and code location", func() { + codeLocation := codelocation.New(0) + beforeEach := NewBeforeEachNode(func() {}, codeLocation, 0, nil, 3) + Ω(beforeEach.Type()).Should(Equal(types.SpecComponentTypeBeforeEach)) + Ω(beforeEach.CodeLocation()).Should(Equal(codeLocation)) + }) + }) + + Describe("AfterEachNodes", func() { + It("should report the correct type and code location", func() { + codeLocation := codelocation.New(0) + afterEach := NewAfterEachNode(func() {}, codeLocation, 0, nil, 3) + Ω(afterEach.Type()).Should(Equal(types.SpecComponentTypeAfterEach)) + Ω(afterEach.CodeLocation()).Should(Equal(codeLocation)) + }) + }) + + Describe("JustBeforeEachNodes", func() { + It("should report the correct type and code location", func() { + codeLocation := codelocation.New(0) + justBeforeEach := NewJustBeforeEachNode(func() {}, codeLocation, 0, nil, 3) + Ω(justBeforeEach.Type()).Should(Equal(types.SpecComponentTypeJustBeforeEach)) + Ω(justBeforeEach.CodeLocation()).Should(Equal(codeLocation)) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go new file mode 100644 index 00000000000..4bf906c706d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go @@ -0,0 +1,359 @@ +package leafnodes_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/leafnodes" + . "github.com/onsi/gomega" + + "reflect" + "runtime" + "time" + + "github.com/onsi/ginkgo/internal/codelocation" + Failer "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type runnable interface { + Run() (outcome types.SpecState, failure types.SpecFailure) + CodeLocation() types.CodeLocation +} + +func SynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) { + var ( + outcome types.SpecState + failure types.SpecFailure + + failer *Failer.Failer + + componentCodeLocation types.CodeLocation + innerCodeLocation types.CodeLocation + + didRun bool + ) + + BeforeEach(func() { + failer = Failer.New() + componentCodeLocation = codelocation.New(0) + innerCodeLocation = codelocation.New(0) + + didRun = false + }) + + Describe("synchronous functions", func() { + Context("when the function passes", func() { + BeforeEach(func() { + outcome, failure = build(func() { + didRun = true + }, 0, failer, componentCodeLocation).Run() + }) + + It("should have a succesful outcome", func() { + Ω(didRun).Should(BeTrue()) + + Ω(outcome).Should(Equal(types.SpecStatePassed)) + Ω(failure).Should(BeZero()) + }) + }) + + Context("when a failure occurs", func() { + BeforeEach(func() { + outcome, failure = build(func() { + didRun = true + failer.Fail("bam", innerCodeLocation) + panic("should not matter") + }, 0, failer, componentCodeLocation).Run() + }) + + It("should return the failure", func() { + Ω(didRun).Should(BeTrue()) + + Ω(outcome).Should(Equal(types.SpecStateFailed)) + Ω(failure).Should(Equal(types.SpecFailure{ + Message: "bam", + Location: innerCodeLocation, + ForwardedPanic: "", + ComponentIndex: componentIndex, + ComponentType: componentType, + ComponentCodeLocation: componentCodeLocation, + })) + }) + }) + + Context("when a panic occurs", func() { + BeforeEach(func() { + outcome, failure = build(func() { + didRun = true + innerCodeLocation = codelocation.New(0) + panic("ack!") + }, 0, failer, componentCodeLocation).Run() + }) + + It("should return the panic", func() { + Ω(didRun).Should(BeTrue()) + + Ω(outcome).Should(Equal(types.SpecStatePanicked)) + Ω(failure.ForwardedPanic).Should(Equal("ack!")) + }) + }) + + Context("when a panic occurs with a nil value", func() { + BeforeEach(func() { + outcome, failure = build(func() { + didRun = true + innerCodeLocation = codelocation.New(0) + panic(nil) + }, 0, failer, componentCodeLocation).Run() + }) + + It("should return the nil-valued panic", func() { + Ω(didRun).Should(BeTrue()) + + Ω(outcome).Should(Equal(types.SpecStatePanicked)) + Ω(failure.ForwardedPanic).Should(Equal("")) + }) + }) + + }) +} + +func AsynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) { + var ( + outcome types.SpecState + failure types.SpecFailure + + failer *Failer.Failer + + componentCodeLocation types.CodeLocation + innerCodeLocation types.CodeLocation + + didRun bool + ) + + BeforeEach(func() { + failer = Failer.New() + componentCodeLocation = codelocation.New(0) + innerCodeLocation = codelocation.New(0) + + didRun = false + }) + + Describe("asynchronous functions", func() { + var timeoutDuration time.Duration + + BeforeEach(func() { + timeoutDuration = time.Duration(1 * float64(time.Second)) + }) + + Context("when running", func() { + It("should run the function as a goroutine, and block until it's done", func() { + initialNumberOfGoRoutines := runtime.NumGoroutine() + numberOfGoRoutines := 0 + + build(func(done Done) { + didRun = true + numberOfGoRoutines = runtime.NumGoroutine() + close(done) + }, timeoutDuration, failer, componentCodeLocation).Run() + + Ω(didRun).Should(BeTrue()) + Ω(numberOfGoRoutines).Should(BeNumerically(">=", initialNumberOfGoRoutines+1)) + }) + }) + + Context("when the function passes", func() { + BeforeEach(func() { + outcome, failure = build(func(done Done) { + didRun = true + close(done) + }, timeoutDuration, failer, componentCodeLocation).Run() + }) + + It("should have a succesful outcome", func() { + Ω(didRun).Should(BeTrue()) + Ω(outcome).Should(Equal(types.SpecStatePassed)) + Ω(failure).Should(BeZero()) + }) + }) + + Context("when the function fails", func() { + BeforeEach(func() { + outcome, failure = build(func(done Done) { + didRun = true + failer.Fail("bam", innerCodeLocation) + time.Sleep(20 * time.Millisecond) + panic("doesn't matter") + close(done) + }, 10*time.Millisecond, failer, componentCodeLocation).Run() + }) + + It("should return the failure", func() { + Ω(didRun).Should(BeTrue()) + + Ω(outcome).Should(Equal(types.SpecStateFailed)) + Ω(failure).Should(Equal(types.SpecFailure{ + Message: "bam", + Location: innerCodeLocation, + ForwardedPanic: "", + ComponentIndex: componentIndex, + ComponentType: componentType, + ComponentCodeLocation: componentCodeLocation, + })) + }) + }) + + Context("when the function times out", func() { + var guard chan struct{} + + BeforeEach(func() { + guard = make(chan struct{}) + outcome, failure = build(func(done Done) { + didRun = true + time.Sleep(20 * time.Millisecond) + close(guard) + panic("doesn't matter") + close(done) + }, 10*time.Millisecond, failer, componentCodeLocation).Run() + }) + + It("should return the timeout", func() { + <-guard + Ω(didRun).Should(BeTrue()) + + Ω(outcome).Should(Equal(types.SpecStateTimedOut)) + Ω(failure).Should(Equal(types.SpecFailure{ + Message: "Timed out", + Location: componentCodeLocation, + ForwardedPanic: "", + ComponentIndex: componentIndex, + ComponentType: componentType, + ComponentCodeLocation: componentCodeLocation, + })) + }) + }) + + Context("when the function panics", func() { + BeforeEach(func() { + outcome, failure = build(func(done Done) { + didRun = true + innerCodeLocation = codelocation.New(0) + panic("ack!") + }, 100*time.Millisecond, failer, componentCodeLocation).Run() + }) + + It("should return the panic", func() { + Ω(didRun).Should(BeTrue()) + + Ω(outcome).Should(Equal(types.SpecStatePanicked)) + Ω(failure.ForwardedPanic).Should(Equal("ack!")) + }) + }) + + Context("when the function panics with a nil value", func() { + BeforeEach(func() { + outcome, failure = build(func(done Done) { + didRun = true + innerCodeLocation = codelocation.New(0) + panic(nil) + }, 100*time.Millisecond, failer, componentCodeLocation).Run() + }) + + It("should return the nil-valued panic", func() { + Ω(didRun).Should(BeTrue()) + + Ω(outcome).Should(Equal(types.SpecStatePanicked)) + Ω(failure.ForwardedPanic).Should(Equal("")) + }) + }) + }) +} + +func InvalidSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType) { + var ( + failer *Failer.Failer + componentCodeLocation types.CodeLocation + ) + + BeforeEach(func() { + failer = Failer.New() + componentCodeLocation = codelocation.New(0) + }) + + Describe("invalid functions", func() { + Context("when passed something that's not a function", func() { + It("should panic", func() { + Ω(func() { + build("not a function", 0, failer, componentCodeLocation) + }).Should(Panic()) + }) + }) + + Context("when the function takes the wrong kind of argument", func() { + It("should panic", func() { + Ω(func() { + build(func(oops string) {}, 0, failer, componentCodeLocation) + }).Should(Panic()) + }) + }) + + Context("when the function takes more than one argument", func() { + It("should panic", func() { + Ω(func() { + build(func(done Done, oops string) {}, 0, failer, componentCodeLocation) + }).Should(Panic()) + }) + }) + }) +} + +var _ = Describe("Shared RunnableNode behavior", func() { + Describe("It Nodes", func() { + build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable { + return NewItNode("", body, types.FlagTypeFocused, componentCodeLocation, timeout, failer, 3) + } + + SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3) + AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3) + InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeIt) + }) + + Describe("Measure Nodes", func() { + build := func(body interface{}, _ time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable { + return NewMeasureNode("", func(Benchmarker) { + reflect.ValueOf(body).Call([]reflect.Value{}) + }, types.FlagTypeFocused, componentCodeLocation, 10, failer, 3) + } + + SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeMeasure, 3) + }) + + Describe("BeforeEach Nodes", func() { + build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable { + return NewBeforeEachNode(body, componentCodeLocation, timeout, failer, 3) + } + + SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3) + AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3) + InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach) + }) + + Describe("AfterEach Nodes", func() { + build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable { + return NewAfterEachNode(body, componentCodeLocation, timeout, failer, 3) + } + + SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3) + AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3) + InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach) + }) + + Describe("JustBeforeEach Nodes", func() { + build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable { + return NewJustBeforeEachNode(body, componentCodeLocation, timeout, failer, 3) + } + + SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3) + AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3) + InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go new file mode 100644 index 00000000000..2ccc7dc0fb0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go @@ -0,0 +1,54 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "time" +) + +type SuiteNode interface { + Run(parallelNode int, parallelTotal int, syncHost string) bool + Passed() bool + Summary() *types.SetupSummary +} + +type simpleSuiteNode struct { + runner *runner + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + t := time.Now() + node.outcome, node.failure = node.runner.run() + node.runTime = time.Since(t) + + return node.outcome == types.SpecStatePassed +} + +func (node *simpleSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *simpleSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runner.nodeType, + CodeLocation: node.runner.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &simpleSuiteNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0), + } +} + +func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &simpleSuiteNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go new file mode 100644 index 00000000000..246b329fe2b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go @@ -0,0 +1,230 @@ +package leafnodes_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "github.com/onsi/ginkgo/internal/leafnodes" + + "time" + + "github.com/onsi/ginkgo/internal/codelocation" + Failer "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +var _ = Describe("SuiteNodes", func() { + Describe("BeforeSuite nodes", func() { + var befSuite SuiteNode + var failer *Failer.Failer + var codeLocation types.CodeLocation + var innerCodeLocation types.CodeLocation + var outcome bool + + BeforeEach(func() { + failer = Failer.New() + codeLocation = codelocation.New(0) + innerCodeLocation = codelocation.New(0) + }) + + Context("when the body passes", func() { + BeforeEach(func() { + befSuite = NewBeforeSuiteNode(func() { + time.Sleep(10 * time.Millisecond) + }, codeLocation, 0, failer) + outcome = befSuite.Run(0, 0, "") + }) + + It("should return true when run and report as passed", func() { + Ω(outcome).Should(BeTrue()) + Ω(befSuite.Passed()).Should(BeTrue()) + }) + + It("should have the correct summary", func() { + summary := befSuite.Summary() + Ω(summary.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) + Ω(summary.CodeLocation).Should(Equal(codeLocation)) + Ω(summary.State).Should(Equal(types.SpecStatePassed)) + Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond)) + Ω(summary.Failure).Should(BeZero()) + }) + }) + + Context("when the body fails", func() { + BeforeEach(func() { + befSuite = NewBeforeSuiteNode(func() { + failer.Fail("oops", innerCodeLocation) + }, codeLocation, 0, failer) + outcome = befSuite.Run(0, 0, "") + }) + + It("should return false when run and report as failed", func() { + Ω(outcome).Should(BeFalse()) + Ω(befSuite.Passed()).Should(BeFalse()) + }) + + It("should have the correct summary", func() { + summary := befSuite.Summary() + Ω(summary.State).Should(Equal(types.SpecStateFailed)) + Ω(summary.Failure.Message).Should(Equal("oops")) + Ω(summary.Failure.Location).Should(Equal(innerCodeLocation)) + Ω(summary.Failure.ForwardedPanic).Should(BeEmpty()) + Ω(summary.Failure.ComponentIndex).Should(Equal(0)) + Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) + Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) + }) + }) + + Context("when the body times out", func() { + BeforeEach(func() { + befSuite = NewBeforeSuiteNode(func(done Done) { + }, codeLocation, time.Millisecond, failer) + outcome = befSuite.Run(0, 0, "") + }) + + It("should return false when run and report as failed", func() { + Ω(outcome).Should(BeFalse()) + Ω(befSuite.Passed()).Should(BeFalse()) + }) + + It("should have the correct summary", func() { + summary := befSuite.Summary() + Ω(summary.State).Should(Equal(types.SpecStateTimedOut)) + Ω(summary.Failure.ForwardedPanic).Should(BeEmpty()) + Ω(summary.Failure.ComponentIndex).Should(Equal(0)) + Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) + Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) + }) + }) + + Context("when the body panics", func() { + BeforeEach(func() { + befSuite = NewBeforeSuiteNode(func() { + panic("bam") + }, codeLocation, 0, failer) + outcome = befSuite.Run(0, 0, "") + }) + + It("should return false when run and report as failed", func() { + Ω(outcome).Should(BeFalse()) + Ω(befSuite.Passed()).Should(BeFalse()) + }) + + It("should have the correct summary", func() { + summary := befSuite.Summary() + Ω(summary.State).Should(Equal(types.SpecStatePanicked)) + Ω(summary.Failure.ForwardedPanic).Should(Equal("bam")) + Ω(summary.Failure.ComponentIndex).Should(Equal(0)) + Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) + Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) + }) + }) + }) + + Describe("AfterSuite nodes", func() { + var aftSuite SuiteNode + var failer *Failer.Failer + var codeLocation types.CodeLocation + var innerCodeLocation types.CodeLocation + var outcome bool + + BeforeEach(func() { + failer = Failer.New() + codeLocation = codelocation.New(0) + innerCodeLocation = codelocation.New(0) + }) + + Context("when the body passes", func() { + BeforeEach(func() { + aftSuite = NewAfterSuiteNode(func() { + time.Sleep(10 * time.Millisecond) + }, codeLocation, 0, failer) + outcome = aftSuite.Run(0, 0, "") + }) + + It("should return true when run and report as passed", func() { + Ω(outcome).Should(BeTrue()) + Ω(aftSuite.Passed()).Should(BeTrue()) + }) + + It("should have the correct summary", func() { + summary := aftSuite.Summary() + Ω(summary.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite)) + Ω(summary.CodeLocation).Should(Equal(codeLocation)) + Ω(summary.State).Should(Equal(types.SpecStatePassed)) + Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond)) + Ω(summary.Failure).Should(BeZero()) + }) + }) + + Context("when the body fails", func() { + BeforeEach(func() { + aftSuite = NewAfterSuiteNode(func() { + failer.Fail("oops", innerCodeLocation) + }, codeLocation, 0, failer) + outcome = aftSuite.Run(0, 0, "") + }) + + It("should return false when run and report as failed", func() { + Ω(outcome).Should(BeFalse()) + Ω(aftSuite.Passed()).Should(BeFalse()) + }) + + It("should have the correct summary", func() { + summary := aftSuite.Summary() + Ω(summary.State).Should(Equal(types.SpecStateFailed)) + Ω(summary.Failure.Message).Should(Equal("oops")) + Ω(summary.Failure.Location).Should(Equal(innerCodeLocation)) + Ω(summary.Failure.ForwardedPanic).Should(BeEmpty()) + Ω(summary.Failure.ComponentIndex).Should(Equal(0)) + Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite)) + Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) + }) + }) + + Context("when the body times out", func() { + BeforeEach(func() { + aftSuite = NewAfterSuiteNode(func(done Done) { + }, codeLocation, time.Millisecond, failer) + outcome = aftSuite.Run(0, 0, "") + }) + + It("should return false when run and report as failed", func() { + Ω(outcome).Should(BeFalse()) + Ω(aftSuite.Passed()).Should(BeFalse()) + }) + + It("should have the correct summary", func() { + summary := aftSuite.Summary() + Ω(summary.State).Should(Equal(types.SpecStateTimedOut)) + Ω(summary.Failure.ForwardedPanic).Should(BeEmpty()) + Ω(summary.Failure.ComponentIndex).Should(Equal(0)) + Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite)) + Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) + }) + }) + + Context("when the body panics", func() { + BeforeEach(func() { + aftSuite = NewAfterSuiteNode(func() { + panic("bam") + }, codeLocation, 0, failer) + outcome = aftSuite.Run(0, 0, "") + }) + + It("should return false when run and report as failed", func() { + Ω(outcome).Should(BeFalse()) + Ω(aftSuite.Passed()).Should(BeFalse()) + }) + + It("should have the correct summary", func() { + summary := aftSuite.Summary() + Ω(summary.State).Should(Equal(types.SpecStatePanicked)) + Ω(summary.Failure.ForwardedPanic).Should(Equal("bam")) + Ω(summary.Failure.ComponentIndex).Should(Equal(0)) + Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite)) + Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go new file mode 100644 index 00000000000..e7030d9149a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go @@ -0,0 +1,89 @@ +package leafnodes + +import ( + "encoding/json" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "io/ioutil" + "net/http" + "time" +) + +type synchronizedAfterSuiteNode struct { + runnerA *runner + runnerB *runner + + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &synchronizedAfterSuiteNode{ + runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + } +} + +func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + node.outcome, node.failure = node.runnerA.run() + + if parallelNode == 1 { + if parallelTotal > 1 { + node.waitUntilOtherNodesAreDone(syncHost) + } + + outcome, failure := node.runnerB.run() + + if node.outcome == types.SpecStatePassed { + node.outcome, node.failure = outcome, failure + } + } + + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedAfterSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runnerA.nodeType, + CodeLocation: node.runnerA.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) { + for { + if node.canRun(syncHost) { + return + } + + time.Sleep(50 * time.Millisecond) + } +} + +func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool { + resp, err := http.Get(syncHost + "/RemoteAfterSuiteData") + if err != nil || resp.StatusCode != http.StatusOK { + return false + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false + } + resp.Body.Close() + + afterSuiteData := types.RemoteAfterSuiteData{} + err = json.Unmarshal(body, &afterSuiteData) + if err != nil { + return false + } + + return afterSuiteData.CanRun +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go new file mode 100644 index 00000000000..4266a4bce6c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go @@ -0,0 +1,196 @@ +package leafnodes_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" + . "github.com/onsi/gomega" + "sync" + + "github.com/onsi/gomega/ghttp" + "net/http" + + "github.com/onsi/ginkgo/internal/codelocation" + Failer "github.com/onsi/ginkgo/internal/failer" + "time" +) + +var _ = Describe("SynchronizedAfterSuiteNode", func() { + var failer *Failer.Failer + var node SuiteNode + var codeLocation types.CodeLocation + var innerCodeLocation types.CodeLocation + var outcome bool + var server *ghttp.Server + var things []string + var lock *sync.Mutex + + BeforeEach(func() { + things = []string{} + server = ghttp.NewServer() + codeLocation = codelocation.New(0) + innerCodeLocation = codelocation.New(0) + failer = Failer.New() + lock = &sync.Mutex{} + }) + + AfterEach(func() { + server.Close() + }) + + newNode := func(bodyA interface{}, bodyB interface{}) SuiteNode { + return NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, time.Millisecond, failer) + } + + ranThing := func(thing string) { + lock.Lock() + defer lock.Unlock() + things = append(things, thing) + } + + thingsThatRan := func() []string { + lock.Lock() + defer lock.Unlock() + return things + } + + Context("when not running in parallel", func() { + Context("when all is well", func() { + BeforeEach(func() { + node = newNode(func() { + ranThing("A") + }, func() { + ranThing("B") + }) + + outcome = node.Run(1, 1, server.URL()) + }) + + It("should run A, then B", func() { + Ω(thingsThatRan()).Should(Equal([]string{"A", "B"})) + }) + + It("should report success", func() { + Ω(outcome).Should(BeTrue()) + Ω(node.Passed()).Should(BeTrue()) + Ω(node.Summary().State).Should(Equal(types.SpecStatePassed)) + }) + }) + + Context("when A fails", func() { + BeforeEach(func() { + node = newNode(func() { + ranThing("A") + failer.Fail("bam", innerCodeLocation) + }, func() { + ranThing("B") + }) + + outcome = node.Run(1, 1, server.URL()) + }) + + It("should still run B", func() { + Ω(thingsThatRan()).Should(Equal([]string{"A", "B"})) + }) + + It("should report failure", func() { + Ω(outcome).Should(BeFalse()) + Ω(node.Passed()).Should(BeFalse()) + Ω(node.Summary().State).Should(Equal(types.SpecStateFailed)) + }) + }) + + Context("when B fails", func() { + BeforeEach(func() { + node = newNode(func() { + ranThing("A") + }, func() { + ranThing("B") + failer.Fail("bam", innerCodeLocation) + }) + + outcome = node.Run(1, 1, server.URL()) + }) + + It("should run all the things", func() { + Ω(thingsThatRan()).Should(Equal([]string{"A", "B"})) + }) + + It("should report failure", func() { + Ω(outcome).Should(BeFalse()) + Ω(node.Passed()).Should(BeFalse()) + Ω(node.Summary().State).Should(Equal(types.SpecStateFailed)) + }) + }) + }) + + Context("when running in parallel", func() { + Context("as the first node", func() { + BeforeEach(func() { + server.AppendHandlers(ghttp.CombineHandlers( + ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"), + func(writer http.ResponseWriter, request *http.Request) { + ranThing("Request1") + }, + ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{false}), + ), ghttp.CombineHandlers( + ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"), + func(writer http.ResponseWriter, request *http.Request) { + ranThing("Request2") + }, + ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{false}), + ), ghttp.CombineHandlers( + ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"), + func(writer http.ResponseWriter, request *http.Request) { + ranThing("Request3") + }, + ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{true}), + )) + + node = newNode(func() { + ranThing("A") + }, func() { + ranThing("B") + }) + + outcome = node.Run(1, 3, server.URL()) + }) + + It("should run A and, when the server says its time, run B", func() { + Ω(thingsThatRan()).Should(Equal([]string{"A", "Request1", "Request2", "Request3", "B"})) + }) + + It("should report success", func() { + Ω(outcome).Should(BeTrue()) + Ω(node.Passed()).Should(BeTrue()) + Ω(node.Summary().State).Should(Equal(types.SpecStatePassed)) + }) + }) + + Context("as any other node", func() { + BeforeEach(func() { + node = newNode(func() { + ranThing("A") + }, func() { + ranThing("B") + }) + + outcome = node.Run(2, 3, server.URL()) + }) + + It("should run A, and not run B", func() { + Ω(thingsThatRan()).Should(Equal([]string{"A"})) + }) + + It("should not talk to the server", func() { + Ω(server.ReceivedRequests()).Should(BeEmpty()) + }) + + It("should report success", func() { + Ω(outcome).Should(BeTrue()) + Ω(node.Passed()).Should(BeTrue()) + Ω(node.Summary().State).Should(Equal(types.SpecStatePassed)) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go new file mode 100644 index 00000000000..76a9679813f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go @@ -0,0 +1,182 @@ +package leafnodes + +import ( + "bytes" + "encoding/json" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "io/ioutil" + "net/http" + "reflect" + "time" +) + +type synchronizedBeforeSuiteNode struct { + runnerA *runner + runnerB *runner + + data []byte + + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + node := &synchronizedBeforeSuiteNode{} + + node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) + node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) + + return node +} + +func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + t := time.Now() + defer func() { + node.runTime = time.Since(t) + }() + + if parallelNode == 1 { + node.outcome, node.failure = node.runA(parallelTotal, syncHost) + } else { + node.outcome, node.failure = node.waitForA(syncHost) + } + + if node.outcome != types.SpecStatePassed { + return false + } + node.outcome, node.failure = node.runnerB.run() + + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) { + outcome, failure := node.runnerA.run() + + if parallelTotal > 1 { + state := types.RemoteBeforeSuiteStatePassed + if outcome != types.SpecStatePassed { + state = types.RemoteBeforeSuiteStateFailed + } + json := (types.RemoteBeforeSuiteData{ + Data: node.data, + State: state, + }).ToJSON() + http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json)) + } + + return outcome, failure +} + +func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) { + failure := func(message string) types.SpecFailure { + return types.SpecFailure{ + Message: message, + Location: node.runnerA.codeLocation, + ComponentType: node.runnerA.nodeType, + ComponentIndex: node.runnerA.componentIndex, + ComponentCodeLocation: node.runnerA.codeLocation, + } + } + for { + resp, err := http.Get(syncHost + "/BeforeSuiteState") + if err != nil || resp.StatusCode != http.StatusOK { + return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state") + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return types.SpecStateFailed, failure("Failed to read BeforeSuite state") + } + resp.Body.Close() + + beforeSuiteData := types.RemoteBeforeSuiteData{} + err = json.Unmarshal(body, &beforeSuiteData) + if err != nil { + return types.SpecStateFailed, failure("Failed to decode BeforeSuite state") + } + + switch beforeSuiteData.State { + case types.RemoteBeforeSuiteStatePassed: + node.data = beforeSuiteData.Data + return types.SpecStatePassed, types.SpecFailure{} + case types.RemoteBeforeSuiteStateFailed: + return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed") + case types.RemoteBeforeSuiteStateDisappeared: + return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite") + } + + time.Sleep(50 * time.Millisecond) + } + + return types.SpecStateFailed, failure("Shouldn't get here!") +} + +func (node *synchronizedBeforeSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runnerA.nodeType, + CodeLocation: node.runnerA.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} { + typeA := reflect.TypeOf(bodyA) + if typeA.Kind() != reflect.Func { + panic("SynchronizedBeforeSuite expects a function as its first argument") + } + + takesNothing := typeA.NumIn() == 0 + takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface + returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8 + + if !((takesNothing || takesADoneChannel) && returnsBytes) { + panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.") + } + + if takesADoneChannel { + return func(done chan<- interface{}) { + out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)}) + node.data = out[0].Interface().([]byte) + } + } + + return func() { + out := reflect.ValueOf(bodyA).Call([]reflect.Value{}) + node.data = out[0].Interface().([]byte) + } +} + +func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} { + typeB := reflect.TypeOf(bodyB) + if typeB.Kind() != reflect.Func { + panic("SynchronizedBeforeSuite expects a function as its second argument") + } + + returnsNothing := typeB.NumOut() == 0 + takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 + takesBytesAndDone := typeB.NumIn() == 2 && + typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 && + typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface + + if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) { + panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)") + } + + if takesBytesAndDone { + return func(done chan<- interface{}) { + reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)}) + } + } + + return func() { + reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)}) + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go new file mode 100644 index 00000000000..dbf2426748a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go @@ -0,0 +1,445 @@ +package leafnodes_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/leafnodes" + . "github.com/onsi/gomega" + + "github.com/onsi/gomega/ghttp" + "net/http" + + "github.com/onsi/ginkgo/internal/codelocation" + Failer "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" + "time" +) + +var _ = Describe("SynchronizedBeforeSuiteNode", func() { + var failer *Failer.Failer + var node SuiteNode + var codeLocation types.CodeLocation + var innerCodeLocation types.CodeLocation + var outcome bool + var server *ghttp.Server + + BeforeEach(func() { + server = ghttp.NewServer() + codeLocation = codelocation.New(0) + innerCodeLocation = codelocation.New(0) + failer = Failer.New() + }) + + AfterEach(func() { + server.Close() + }) + + newNode := func(bodyA interface{}, bodyB interface{}) SuiteNode { + return NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, time.Millisecond, failer) + } + + Describe("when not running in parallel", func() { + Context("when all is well", func() { + var data []byte + BeforeEach(func() { + data = nil + + node = newNode(func() []byte { + return []byte("my data") + }, func(d []byte) { + data = d + }) + + outcome = node.Run(1, 1, server.URL()) + }) + + It("should run A, then B passing the output from A to B", func() { + Ω(data).Should(Equal([]byte("my data"))) + }) + + It("should report success", func() { + Ω(outcome).Should(BeTrue()) + Ω(node.Passed()).Should(BeTrue()) + Ω(node.Summary().State).Should(Equal(types.SpecStatePassed)) + }) + }) + + Context("when A fails", func() { + var ranB bool + BeforeEach(func() { + ranB = false + node = newNode(func() []byte { + failer.Fail("boom", innerCodeLocation) + return nil + }, func([]byte) { + ranB = true + }) + + outcome = node.Run(1, 1, server.URL()) + }) + + It("should not run B", func() { + Ω(ranB).Should(BeFalse()) + }) + + It("should report failure", func() { + Ω(outcome).Should(BeFalse()) + Ω(node.Passed()).Should(BeFalse()) + Ω(node.Summary().State).Should(Equal(types.SpecStateFailed)) + }) + }) + + Context("when B fails", func() { + BeforeEach(func() { + node = newNode(func() []byte { + return nil + }, func([]byte) { + failer.Fail("boom", innerCodeLocation) + }) + + outcome = node.Run(1, 1, server.URL()) + }) + + It("should report failure", func() { + Ω(outcome).Should(BeFalse()) + Ω(node.Passed()).Should(BeFalse()) + Ω(node.Summary().State).Should(Equal(types.SpecStateFailed)) + }) + }) + + Context("when A times out", func() { + var ranB bool + BeforeEach(func() { + ranB = false + node = newNode(func(Done) []byte { + time.Sleep(time.Second) + return nil + }, func([]byte) { + ranB = true + }) + + outcome = node.Run(1, 1, server.URL()) + }) + + It("should not run B", func() { + Ω(ranB).Should(BeFalse()) + }) + + It("should report failure", func() { + Ω(outcome).Should(BeFalse()) + Ω(node.Passed()).Should(BeFalse()) + Ω(node.Summary().State).Should(Equal(types.SpecStateTimedOut)) + }) + }) + + Context("when B times out", func() { + BeforeEach(func() { + node = newNode(func() []byte { + return nil + }, func([]byte, Done) { + time.Sleep(time.Second) + }) + + outcome = node.Run(1, 1, server.URL()) + }) + + It("should report failure", func() { + Ω(outcome).Should(BeFalse()) + Ω(node.Passed()).Should(BeFalse()) + Ω(node.Summary().State).Should(Equal(types.SpecStateTimedOut)) + }) + }) + }) + + Describe("when running in parallel", func() { + var ranB bool + var parallelNode, parallelTotal int + BeforeEach(func() { + ranB = false + parallelNode, parallelTotal = 1, 3 + }) + + Context("as the first node, it runs A", func() { + var expectedState types.RemoteBeforeSuiteData + + BeforeEach(func() { + parallelNode, parallelTotal = 1, 3 + }) + + JustBeforeEach(func() { + server.AppendHandlers(ghttp.CombineHandlers( + ghttp.VerifyRequest("POST", "/BeforeSuiteState"), + ghttp.VerifyJSONRepresenting(expectedState), + )) + + outcome = node.Run(parallelNode, parallelTotal, server.URL()) + }) + + Context("when A succeeds", func() { + BeforeEach(func() { + expectedState = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStatePassed} + + node = newNode(func() []byte { + return []byte("my data") + }, func([]byte) { + ranB = true + }) + }) + + It("should post about A succeeding", func() { + Ω(server.ReceivedRequests()).Should(HaveLen(1)) + }) + + It("should run B", func() { + Ω(ranB).Should(BeTrue()) + }) + + It("should report success", func() { + Ω(outcome).Should(BeTrue()) + }) + }) + + Context("when A fails", func() { + BeforeEach(func() { + expectedState = types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStateFailed} + + node = newNode(func() []byte { + panic("BAM") + return []byte("my data") + }, func([]byte) { + ranB = true + }) + }) + + It("should post about A failing", func() { + Ω(server.ReceivedRequests()).Should(HaveLen(1)) + }) + + It("should not run B", func() { + Ω(ranB).Should(BeFalse()) + }) + + It("should report failure", func() { + Ω(outcome).Should(BeFalse()) + }) + }) + }) + + Context("as the Nth node", func() { + var statusCode int + var response interface{} + var ranA bool + var bData []byte + + BeforeEach(func() { + ranA = false + bData = nil + + statusCode = http.StatusOK + + server.AppendHandlers(ghttp.CombineHandlers( + ghttp.VerifyRequest("GET", "/BeforeSuiteState"), + ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}).ToJSON())), + ), ghttp.CombineHandlers( + ghttp.VerifyRequest("GET", "/BeforeSuiteState"), + ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}).ToJSON())), + ), ghttp.CombineHandlers( + ghttp.VerifyRequest("GET", "/BeforeSuiteState"), + ghttp.RespondWithJSONEncodedPtr(&statusCode, &response), + )) + + node = newNode(func() []byte { + ranA = true + return nil + }, func(data []byte) { + bData = data + }) + + parallelNode, parallelTotal = 2, 3 + }) + + Context("when A on node1 succeeds", func() { + BeforeEach(func() { + response = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStatePassed} + outcome = node.Run(parallelNode, parallelTotal, server.URL()) + }) + + It("should not run A", func() { + Ω(ranA).Should(BeFalse()) + }) + + It("should poll for A", func() { + Ω(server.ReceivedRequests()).Should(HaveLen(3)) + }) + + It("should run B when the polling succeeds", func() { + Ω(bData).Should(Equal([]byte("my data"))) + }) + + It("should succeed", func() { + Ω(outcome).Should(BeTrue()) + Ω(node.Passed()).Should(BeTrue()) + }) + }) + + Context("when A on node1 fails", func() { + BeforeEach(func() { + response = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStateFailed} + outcome = node.Run(parallelNode, parallelTotal, server.URL()) + }) + + It("should not run A", func() { + Ω(ranA).Should(BeFalse()) + }) + + It("should poll for A", func() { + Ω(server.ReceivedRequests()).Should(HaveLen(3)) + }) + + It("should not run B", func() { + Ω(bData).Should(BeNil()) + }) + + It("should fail", func() { + Ω(outcome).Should(BeFalse()) + Ω(node.Passed()).Should(BeFalse()) + + summary := node.Summary() + Ω(summary.State).Should(Equal(types.SpecStateFailed)) + Ω(summary.Failure.Message).Should(Equal("BeforeSuite on Node 1 failed")) + Ω(summary.Failure.Location).Should(Equal(codeLocation)) + Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) + Ω(summary.Failure.ComponentIndex).Should(Equal(0)) + Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) + }) + }) + + Context("when node1 disappears", func() { + BeforeEach(func() { + response = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStateDisappeared} + outcome = node.Run(parallelNode, parallelTotal, server.URL()) + }) + + It("should not run A", func() { + Ω(ranA).Should(BeFalse()) + }) + + It("should poll for A", func() { + Ω(server.ReceivedRequests()).Should(HaveLen(3)) + }) + + It("should not run B", func() { + Ω(bData).Should(BeNil()) + }) + + It("should fail", func() { + Ω(outcome).Should(BeFalse()) + Ω(node.Passed()).Should(BeFalse()) + + summary := node.Summary() + Ω(summary.State).Should(Equal(types.SpecStateFailed)) + Ω(summary.Failure.Message).Should(Equal("Node 1 disappeared before completing BeforeSuite")) + Ω(summary.Failure.Location).Should(Equal(codeLocation)) + Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) + Ω(summary.Failure.ComponentIndex).Should(Equal(0)) + Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) + }) + }) + }) + }) + + Describe("construction", func() { + Describe("the first function", func() { + Context("when the first function returns a byte array", func() { + Context("and takes nothing", func() { + It("should be fine", func() { + Ω(func() { + newNode(func() []byte { return nil }, func([]byte) {}) + }).ShouldNot(Panic()) + }) + }) + + Context("and takes a done function", func() { + It("should be fine", func() { + Ω(func() { + newNode(func(Done) []byte { return nil }, func([]byte) {}) + }).ShouldNot(Panic()) + }) + }) + + Context("and takes more than one thing", func() { + It("should panic", func() { + Ω(func() { + newNode(func(Done, Done) []byte { return nil }, func([]byte) {}) + }).Should(Panic()) + }) + }) + + Context("and takes something else", func() { + It("should panic", func() { + Ω(func() { + newNode(func(bool) []byte { return nil }, func([]byte) {}) + }).Should(Panic()) + }) + }) + }) + + Context("when the first function does not return a byte array", func() { + It("should panic", func() { + Ω(func() { + newNode(func() {}, func([]byte) {}) + }).Should(Panic()) + + Ω(func() { + newNode(func() []int { return nil }, func([]byte) {}) + }).Should(Panic()) + }) + }) + }) + + Describe("the second function", func() { + Context("when the second function takes a byte array", func() { + It("should be fine", func() { + Ω(func() { + newNode(func() []byte { return nil }, func([]byte) {}) + }).ShouldNot(Panic()) + }) + }) + + Context("when it also takes a done channel", func() { + It("should be fine", func() { + Ω(func() { + newNode(func() []byte { return nil }, func([]byte, Done) {}) + }).ShouldNot(Panic()) + }) + }) + + Context("if it takes anything else", func() { + It("should panic", func() { + Ω(func() { + newNode(func() []byte { return nil }, func([]byte, chan bool) {}) + }).Should(Panic()) + + Ω(func() { + newNode(func() []byte { return nil }, func(string) {}) + }).Should(Panic()) + }) + }) + + Context("if it takes nothing at all", func() { + It("should panic", func() { + Ω(func() { + newNode(func() []byte { return nil }, func() {}) + }).Should(Panic()) + }) + }) + + Context("if it returns something", func() { + It("should panic", func() { + Ω(func() { + newNode(func() []byte { return nil }, func([]byte) []byte { return nil }) + }).Should(Panic()) + }) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go new file mode 100644 index 00000000000..522d44e3573 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go @@ -0,0 +1,249 @@ +/* + +Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output +coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel: + + ginkgo -nodes=N + +where N is the number of nodes you desire. +*/ +package remote + +import ( + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +type configAndSuite struct { + config config.GinkgoConfigType + summary *types.SuiteSummary +} + +type Aggregator struct { + nodeCount int + config config.DefaultReporterConfigType + stenographer stenographer.Stenographer + result chan bool + + suiteBeginnings chan configAndSuite + aggregatedSuiteBeginnings []configAndSuite + + beforeSuites chan *types.SetupSummary + aggregatedBeforeSuites []*types.SetupSummary + + afterSuites chan *types.SetupSummary + aggregatedAfterSuites []*types.SetupSummary + + specCompletions chan *types.SpecSummary + completedSpecs []*types.SpecSummary + + suiteEndings chan *types.SuiteSummary + aggregatedSuiteEndings []*types.SuiteSummary + specs []*types.SpecSummary + + startTime time.Time +} + +func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator { + aggregator := &Aggregator{ + nodeCount: nodeCount, + result: result, + config: config, + stenographer: stenographer, + + suiteBeginnings: make(chan configAndSuite, 0), + beforeSuites: make(chan *types.SetupSummary, 0), + afterSuites: make(chan *types.SetupSummary, 0), + specCompletions: make(chan *types.SpecSummary, 0), + suiteEndings: make(chan *types.SuiteSummary, 0), + } + + go aggregator.mux() + + return aggregator +} + +func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + aggregator.suiteBeginnings <- configAndSuite{config, summary} +} + +func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.beforeSuites <- setupSummary +} + +func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.afterSuites <- setupSummary +} + +func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) { + //noop +} + +func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) { + aggregator.specCompletions <- specSummary +} + +func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) { + aggregator.suiteEndings <- summary +} + +func (aggregator *Aggregator) mux() { +loop: + for { + select { + case configAndSuite := <-aggregator.suiteBeginnings: + aggregator.registerSuiteBeginning(configAndSuite) + case setupSummary := <-aggregator.beforeSuites: + aggregator.registerBeforeSuite(setupSummary) + case setupSummary := <-aggregator.afterSuites: + aggregator.registerAfterSuite(setupSummary) + case specSummary := <-aggregator.specCompletions: + aggregator.registerSpecCompletion(specSummary) + case suite := <-aggregator.suiteEndings: + finished, passed := aggregator.registerSuiteEnding(suite) + if finished { + aggregator.result <- passed + break loop + } + } + } +} + +func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) { + aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite) + + if len(aggregator.aggregatedSuiteBeginnings) == 1 { + aggregator.startTime = time.Now() + } + + if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { + return + } + + aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct) + + totalNumberOfSpecs := 0 + if len(aggregator.aggregatedSuiteBeginnings) > 0 { + totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization + } + + aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct) + aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) { + aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) { + aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) { + aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary) + aggregator.specs = append(aggregator.specs, specSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) flushCompletedSpecs() { + if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { + return + } + + for _, setupSummary := range aggregator.aggregatedBeforeSuites { + aggregator.announceBeforeSuite(setupSummary) + } + + for _, specSummary := range aggregator.completedSpecs { + aggregator.announceSpec(specSummary) + } + + for _, setupSummary := range aggregator.aggregatedAfterSuites { + aggregator.announceAfterSuite(setupSummary) + } + + aggregator.aggregatedBeforeSuites = []*types.SetupSummary{} + aggregator.completedSpecs = []*types.SpecSummary{} + aggregator.aggregatedAfterSuites = []*types.SetupSummary{} +} + +func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) { + aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) + if setupSummary.State != types.SpecStatePassed { + aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) { + aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) + if setupSummary.State != types.SpecStatePassed { + aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) { + if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { + aggregator.stenographer.AnnounceSpecWillRun(specSummary) + } + + aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) + + switch specSummary.State { + case types.SpecStatePassed: + if specSummary.IsMeasurement { + aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct) + } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold { + aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct) + } else { + aggregator.stenographer.AnnounceSuccesfulSpec(specSummary) + } + + case types.SpecStatePending: + aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct) + case types.SpecStateSkipped: + aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStateTimedOut: + aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStatePanicked: + aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStateFailed: + aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) { + aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite) + if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount { + return false, false + } + + aggregatedSuiteSummary := &types.SuiteSummary{} + aggregatedSuiteSummary.SuiteSucceeded = true + + for _, suiteSummary := range aggregator.aggregatedSuiteEndings { + if suiteSummary.SuiteSucceeded == false { + aggregatedSuiteSummary.SuiteSucceeded = false + } + + aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun + aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs + aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs + aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs + aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs + aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs + aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs + } + + aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime) + + aggregator.stenographer.SummarizeFailures(aggregator.specs) + aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct) + + return true, aggregatedSuiteSummary.SuiteSucceeded +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go new file mode 100644 index 00000000000..aedf9392789 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator_test.go @@ -0,0 +1,315 @@ +package remote_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "time" + + "github.com/onsi/ginkgo/config" + . "github.com/onsi/ginkgo/internal/remote" + st "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +var _ = Describe("Aggregator", func() { + var ( + aggregator *Aggregator + reporterConfig config.DefaultReporterConfigType + stenographer *st.FakeStenographer + result chan bool + + ginkgoConfig1 config.GinkgoConfigType + ginkgoConfig2 config.GinkgoConfigType + + suiteSummary1 *types.SuiteSummary + suiteSummary2 *types.SuiteSummary + + beforeSummary *types.SetupSummary + afterSummary *types.SetupSummary + specSummary *types.SpecSummary + + suiteDescription string + ) + + BeforeEach(func() { + reporterConfig = config.DefaultReporterConfigType{ + NoColor: false, + SlowSpecThreshold: 0.1, + NoisyPendings: true, + Succinct: false, + Verbose: true, + } + stenographer = st.NewFakeStenographer() + result = make(chan bool, 1) + aggregator = NewAggregator(2, result, reporterConfig, stenographer) + + // + // now set up some fixture data + // + + ginkgoConfig1 = config.GinkgoConfigType{ + RandomSeed: 1138, + RandomizeAllSpecs: true, + ParallelNode: 1, + ParallelTotal: 2, + } + + ginkgoConfig2 = config.GinkgoConfigType{ + RandomSeed: 1138, + RandomizeAllSpecs: true, + ParallelNode: 2, + ParallelTotal: 2, + } + + suiteDescription = "My Parallel Suite" + + suiteSummary1 = &types.SuiteSummary{ + SuiteDescription: suiteDescription, + + NumberOfSpecsBeforeParallelization: 30, + NumberOfTotalSpecs: 17, + NumberOfSpecsThatWillBeRun: 15, + NumberOfPendingSpecs: 1, + NumberOfSkippedSpecs: 1, + } + + suiteSummary2 = &types.SuiteSummary{ + SuiteDescription: suiteDescription, + + NumberOfSpecsBeforeParallelization: 30, + NumberOfTotalSpecs: 13, + NumberOfSpecsThatWillBeRun: 8, + NumberOfPendingSpecs: 2, + NumberOfSkippedSpecs: 3, + } + + beforeSummary = &types.SetupSummary{ + State: types.SpecStatePassed, + CapturedOutput: "BeforeSuiteOutput", + } + + afterSummary = &types.SetupSummary{ + State: types.SpecStatePassed, + CapturedOutput: "AfterSuiteOutput", + } + + specSummary = &types.SpecSummary{ + State: types.SpecStatePassed, + CapturedOutput: "SpecOutput", + } + }) + + call := func(method string, args ...interface{}) st.FakeStenographerCall { + return st.NewFakeStenographerCall(method, args...) + } + + beginSuite := func() { + stenographer.Reset() + aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2) + aggregator.SpecSuiteWillBegin(ginkgoConfig1, suiteSummary1) + Eventually(func() interface{} { + return len(stenographer.Calls()) + }).Should(BeNumerically(">=", 3)) + } + + Describe("Announcing the beginning of the suite", func() { + Context("When one of the parallel-suites starts", func() { + BeforeEach(func() { + aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2) + }) + + It("should be silent", func() { + Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty()) + }) + }) + + Context("once all of the parallel-suites have started", func() { + BeforeEach(func() { + aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2) + aggregator.SpecSuiteWillBegin(ginkgoConfig1, suiteSummary1) + Eventually(func() interface{} { + return stenographer.Calls() + }).Should(HaveLen(3)) + }) + + It("should announce the beginning of the suite", func() { + Ω(stenographer.Calls()).Should(HaveLen(3)) + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuite", suiteDescription, ginkgoConfig1.RandomSeed, true, false))) + Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceTotalNumberOfSpecs", 30, false))) + Ω(stenographer.Calls()[2]).Should(Equal(call("AnnounceAggregatedParallelRun", 2, false))) + }) + }) + }) + + Describe("Announcing specs and before suites", func() { + Context("when the parallel-suites have not all started", func() { + BeforeEach(func() { + aggregator.BeforeSuiteDidRun(beforeSummary) + aggregator.AfterSuiteDidRun(afterSummary) + aggregator.SpecDidComplete(specSummary) + }) + + It("should not announce any specs", func() { + Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty()) + }) + + Context("when the parallel-suites subsequently start", func() { + BeforeEach(func() { + beginSuite() + }) + + It("should announce the specs, the before suites and the after suites", func() { + Eventually(func() interface{} { + return stenographer.Calls() + }).Should(ContainElement(call("AnnounceSuccesfulSpec", specSummary))) + + Ω(stenographer.Calls()).Should(ContainElement(call("AnnounceCapturedOutput", beforeSummary.CapturedOutput))) + Ω(stenographer.Calls()).Should(ContainElement(call("AnnounceCapturedOutput", afterSummary.CapturedOutput))) + }) + }) + }) + + Context("When the parallel-suites have all started", func() { + BeforeEach(func() { + beginSuite() + stenographer.Reset() + }) + + Context("When a spec completes", func() { + BeforeEach(func() { + aggregator.BeforeSuiteDidRun(beforeSummary) + aggregator.SpecDidComplete(specSummary) + aggregator.AfterSuiteDidRun(afterSummary) + Eventually(func() interface{} { + return stenographer.Calls() + }).Should(HaveLen(5)) + }) + + It("should announce the captured output of the BeforeSuite", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceCapturedOutput", beforeSummary.CapturedOutput))) + }) + + It("should announce that the spec will run (when in verbose mode)", func() { + Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceSpecWillRun", specSummary))) + }) + + It("should announce the captured stdout of the spec", func() { + Ω(stenographer.Calls()[2]).Should(Equal(call("AnnounceCapturedOutput", specSummary.CapturedOutput))) + }) + + It("should announce completion", func() { + Ω(stenographer.Calls()[3]).Should(Equal(call("AnnounceSuccesfulSpec", specSummary))) + }) + + It("should announce the captured output of the AfterSuite", func() { + Ω(stenographer.Calls()[4]).Should(Equal(call("AnnounceCapturedOutput", afterSummary.CapturedOutput))) + }) + }) + }) + }) + + Describe("Announcing the end of the suite", func() { + BeforeEach(func() { + beginSuite() + stenographer.Reset() + }) + + Context("When one of the parallel-suites ends", func() { + BeforeEach(func() { + aggregator.SpecSuiteDidEnd(suiteSummary2) + }) + + It("should be silent", func() { + Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty()) + }) + + It("should not notify the channel", func() { + Ω(result).Should(BeEmpty()) + }) + }) + + Context("once all of the parallel-suites end", func() { + BeforeEach(func() { + time.Sleep(200 * time.Millisecond) + + suiteSummary1.SuiteSucceeded = true + suiteSummary1.NumberOfPassedSpecs = 15 + suiteSummary1.NumberOfFailedSpecs = 0 + suiteSummary1.NumberOfFlakedSpecs = 3 + suiteSummary2.SuiteSucceeded = false + suiteSummary2.NumberOfPassedSpecs = 5 + suiteSummary2.NumberOfFailedSpecs = 3 + suiteSummary2.NumberOfFlakedSpecs = 4 + + aggregator.SpecSuiteDidEnd(suiteSummary2) + aggregator.SpecSuiteDidEnd(suiteSummary1) + Eventually(func() interface{} { + return stenographer.Calls() + }).Should(HaveLen(2)) + }) + + It("should announce the end of the suite", func() { + compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary) + + Ω(compositeSummary.SuiteSucceeded).Should(BeFalse()) + Ω(compositeSummary.NumberOfSpecsThatWillBeRun).Should(Equal(23)) + Ω(compositeSummary.NumberOfTotalSpecs).Should(Equal(30)) + Ω(compositeSummary.NumberOfPassedSpecs).Should(Equal(20)) + Ω(compositeSummary.NumberOfFailedSpecs).Should(Equal(3)) + Ω(compositeSummary.NumberOfPendingSpecs).Should(Equal(3)) + Ω(compositeSummary.NumberOfSkippedSpecs).Should(Equal(4)) + Ω(compositeSummary.NumberOfFlakedSpecs).Should(Equal(7)) + Ω(compositeSummary.RunTime.Seconds()).Should(BeNumerically(">", 0.2)) + }) + }) + + Context("when all the parallel-suites pass", func() { + BeforeEach(func() { + suiteSummary1.SuiteSucceeded = true + suiteSummary2.SuiteSucceeded = true + + aggregator.SpecSuiteDidEnd(suiteSummary2) + aggregator.SpecSuiteDidEnd(suiteSummary1) + Eventually(func() interface{} { + return stenographer.Calls() + }).Should(HaveLen(2)) + }) + + It("should report success", func() { + compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary) + + Ω(compositeSummary.SuiteSucceeded).Should(BeTrue()) + }) + + It("should notify the channel that it succeded", func(done Done) { + Ω(<-result).Should(BeTrue()) + close(done) + }) + }) + + Context("when one of the parallel-suites fails", func() { + BeforeEach(func() { + suiteSummary1.SuiteSucceeded = true + suiteSummary2.SuiteSucceeded = false + + aggregator.SpecSuiteDidEnd(suiteSummary2) + aggregator.SpecSuiteDidEnd(suiteSummary1) + Eventually(func() interface{} { + return stenographer.Calls() + }).Should(HaveLen(2)) + }) + + It("should report failure", func() { + compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary) + + Ω(compositeSummary.SuiteSucceeded).Should(BeFalse()) + }) + + It("should notify the channel that it failed", func(done Done) { + Ω(<-result).Should(BeFalse()) + close(done) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go new file mode 100644 index 00000000000..a928f93d311 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go @@ -0,0 +1,17 @@ +package remote_test + +type fakeOutputInterceptor struct { + DidStartInterceptingOutput bool + DidStopInterceptingOutput bool + InterceptedOutput string +} + +func (interceptor *fakeOutputInterceptor) StartInterceptingOutput() error { + interceptor.DidStartInterceptingOutput = true + return nil +} + +func (interceptor *fakeOutputInterceptor) StopInterceptingAndReturnOutput() (string, error) { + interceptor.DidStopInterceptingOutput = true + return interceptor.InterceptedOutput, nil +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go new file mode 100644 index 00000000000..3543c59c64c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go @@ -0,0 +1,33 @@ +package remote_test + +import ( + "io" + "io/ioutil" + "net/http" +) + +type post struct { + url string + bodyType string + bodyContent []byte +} + +type fakePoster struct { + posts []post +} + +func newFakePoster() *fakePoster { + return &fakePoster{ + posts: make([]post, 0), + } +} + +func (poster *fakePoster) Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) { + bodyContent, _ := ioutil.ReadAll(body) + poster.posts = append(poster.posts, post{ + url: url, + bodyType: bodyType, + bodyContent: bodyContent, + }) + return nil, nil +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go new file mode 100644 index 00000000000..025eb506448 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go @@ -0,0 +1,90 @@ +package remote + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +//An interface to net/http's client to allow the injection of fakes under test +type Poster interface { + Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) +} + +/* +The ForwardingReporter is a Ginkgo reporter that forwards information to +a Ginkgo remote server. + +When streaming parallel test output, this repoter is automatically installed by Ginkgo. + +This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner +detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter +in place of Ginkgo's DefaultReporter. +*/ + +type ForwardingReporter struct { + serverHost string + poster Poster + outputInterceptor OutputInterceptor +} + +func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter { + return &ForwardingReporter{ + serverHost: serverHost, + poster: poster, + outputInterceptor: outputInterceptor, + } +} + +func (reporter *ForwardingReporter) post(path string, data interface{}) { + encoded, _ := json.Marshal(data) + buffer := bytes.NewBuffer(encoded) + reporter.poster.Post(reporter.serverHost+path, "application/json", buffer) +} + +func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) { + data := struct { + Config config.GinkgoConfigType `json:"config"` + Summary *types.SuiteSummary `json:"suite-summary"` + }{ + conf, + summary, + } + + reporter.outputInterceptor.StartInterceptingOutput() + reporter.post("/SpecSuiteWillBegin", data) +} + +func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.CapturedOutput = output + reporter.post("/BeforeSuiteDidRun", setupSummary) +} + +func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) { + reporter.post("/SpecWillRun", specSummary) +} + +func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + specSummary.CapturedOutput = output + reporter.post("/SpecDidComplete", specSummary) +} + +func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.CapturedOutput = output + reporter.post("/AfterSuiteDidRun", setupSummary) +} + +func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.post("/SpecSuiteDidEnd", summary) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go new file mode 100644 index 00000000000..e5f3b1e3071 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go @@ -0,0 +1,180 @@ +package remote_test + +import ( + "encoding/json" + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + . "github.com/onsi/ginkgo/internal/remote" + "github.com/onsi/ginkgo/types" + . "github.com/onsi/gomega" +) + +var _ = Describe("ForwardingReporter", func() { + var ( + reporter *ForwardingReporter + interceptor *fakeOutputInterceptor + poster *fakePoster + suiteSummary *types.SuiteSummary + specSummary *types.SpecSummary + setupSummary *types.SetupSummary + serverHost string + ) + + BeforeEach(func() { + serverHost = "http://127.0.0.1:7788" + + poster = newFakePoster() + + interceptor = &fakeOutputInterceptor{ + InterceptedOutput: "The intercepted output!", + } + + reporter = NewForwardingReporter(serverHost, poster, interceptor) + + suiteSummary = &types.SuiteSummary{ + SuiteDescription: "My Test Suite", + } + + setupSummary = &types.SetupSummary{ + State: types.SpecStatePassed, + } + + specSummary = &types.SpecSummary{ + ComponentTexts: []string{"My", "Spec"}, + State: types.SpecStatePassed, + } + }) + + Context("When a suite begins", func() { + BeforeEach(func() { + reporter.SpecSuiteWillBegin(config.GinkgoConfig, suiteSummary) + }) + + It("should start intercepting output", func() { + Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue()) + }) + + It("should POST the SuiteSummary and Ginkgo Config to the Ginkgo server", func() { + Ω(poster.posts).Should(HaveLen(1)) + Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecSuiteWillBegin")) + Ω(poster.posts[0].bodyType).Should(Equal("application/json")) + + var sentData struct { + SentConfig config.GinkgoConfigType `json:"config"` + SentSuiteSummary *types.SuiteSummary `json:"suite-summary"` + } + + err := json.Unmarshal(poster.posts[0].bodyContent, &sentData) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(sentData.SentConfig).Should(Equal(config.GinkgoConfig)) + Ω(sentData.SentSuiteSummary).Should(Equal(suiteSummary)) + }) + }) + + Context("when a BeforeSuite completes", func() { + BeforeEach(func() { + reporter.BeforeSuiteDidRun(setupSummary) + }) + + It("should stop, then start intercepting output", func() { + Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue()) + Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue()) + }) + + It("should POST the SetupSummary to the Ginkgo server", func() { + Ω(poster.posts).Should(HaveLen(1)) + Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/BeforeSuiteDidRun")) + Ω(poster.posts[0].bodyType).Should(Equal("application/json")) + + var summary *types.SetupSummary + err := json.Unmarshal(poster.posts[0].bodyContent, &summary) + Ω(err).ShouldNot(HaveOccurred()) + setupSummary.CapturedOutput = interceptor.InterceptedOutput + Ω(summary).Should(Equal(setupSummary)) + }) + }) + + Context("when an AfterSuite completes", func() { + BeforeEach(func() { + reporter.AfterSuiteDidRun(setupSummary) + }) + + It("should stop, then start intercepting output", func() { + Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue()) + Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue()) + }) + + It("should POST the SetupSummary to the Ginkgo server", func() { + Ω(poster.posts).Should(HaveLen(1)) + Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/AfterSuiteDidRun")) + Ω(poster.posts[0].bodyType).Should(Equal("application/json")) + + var summary *types.SetupSummary + err := json.Unmarshal(poster.posts[0].bodyContent, &summary) + Ω(err).ShouldNot(HaveOccurred()) + setupSummary.CapturedOutput = interceptor.InterceptedOutput + Ω(summary).Should(Equal(setupSummary)) + }) + }) + + Context("When a spec will run", func() { + BeforeEach(func() { + reporter.SpecWillRun(specSummary) + }) + + It("should POST the SpecSummary to the Ginkgo server", func() { + Ω(poster.posts).Should(HaveLen(1)) + Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecWillRun")) + Ω(poster.posts[0].bodyType).Should(Equal("application/json")) + + var summary *types.SpecSummary + err := json.Unmarshal(poster.posts[0].bodyContent, &summary) + Ω(err).ShouldNot(HaveOccurred()) + Ω(summary).Should(Equal(specSummary)) + }) + + Context("When a spec completes", func() { + BeforeEach(func() { + specSummary.State = types.SpecStatePanicked + reporter.SpecDidComplete(specSummary) + }) + + It("should POST the SpecSummary to the Ginkgo server and include any intercepted output", func() { + Ω(poster.posts).Should(HaveLen(2)) + Ω(poster.posts[1].url).Should(Equal("http://127.0.0.1:7788/SpecDidComplete")) + Ω(poster.posts[1].bodyType).Should(Equal("application/json")) + + var summary *types.SpecSummary + err := json.Unmarshal(poster.posts[1].bodyContent, &summary) + Ω(err).ShouldNot(HaveOccurred()) + specSummary.CapturedOutput = interceptor.InterceptedOutput + Ω(summary).Should(Equal(specSummary)) + }) + + It("should stop, then start intercepting output", func() { + Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue()) + Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue()) + }) + }) + }) + + Context("When a suite ends", func() { + BeforeEach(func() { + reporter.SpecSuiteDidEnd(suiteSummary) + }) + + It("should POST the SuiteSummary to the Ginkgo server", func() { + Ω(poster.posts).Should(HaveLen(1)) + Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecSuiteDidEnd")) + Ω(poster.posts[0].bodyType).Should(Equal("application/json")) + + var summary *types.SuiteSummary + + err := json.Unmarshal(poster.posts[0].bodyContent, &summary) + Ω(err).ShouldNot(HaveOccurred()) + + Ω(summary).Should(Equal(suiteSummary)) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go new file mode 100644 index 00000000000..093f4513c0b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go @@ -0,0 +1,10 @@ +package remote + +/* +The OutputInterceptor is used by the ForwardingReporter to +intercept and capture all stdin and stderr output during a test run. +*/ +type OutputInterceptor interface { + StartInterceptingOutput() error + StopInterceptingAndReturnOutput() (string, error) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go new file mode 100644 index 00000000000..980065da575 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go @@ -0,0 +1,55 @@ +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package remote + +import ( + "errors" + "io/ioutil" + "os" +) + +func NewOutputInterceptor() OutputInterceptor { + return &outputInterceptor{} +} + +type outputInterceptor struct { + redirectFile *os.File + intercepting bool +} + +func (interceptor *outputInterceptor) StartInterceptingOutput() error { + if interceptor.intercepting { + return errors.New("Already intercepting output!") + } + interceptor.intercepting = true + + var err error + + interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output") + if err != nil { + return err + } + + // Call a function in ./syscall_dup_*.go + // If building for everything other than linux_arm64, + // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2) + // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3 + syscallDup(int(interceptor.redirectFile.Fd()), 1) + syscallDup(int(interceptor.redirectFile.Fd()), 2) + + return nil +} + +func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { + if !interceptor.intercepting { + return "", errors.New("Not intercepting output!") + } + + interceptor.redirectFile.Close() + output, err := ioutil.ReadFile(interceptor.redirectFile.Name()) + os.Remove(interceptor.redirectFile.Name()) + + interceptor.intercepting = false + + return string(output), err +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go new file mode 100644 index 00000000000..c8f97d97f07 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go @@ -0,0 +1,33 @@ +// +build windows + +package remote + +import ( + "errors" +) + +func NewOutputInterceptor() OutputInterceptor { + return &outputInterceptor{} +} + +type outputInterceptor struct { + intercepting bool +} + +func (interceptor *outputInterceptor) StartInterceptingOutput() error { + if interceptor.intercepting { + return errors.New("Already intercepting output!") + } + interceptor.intercepting = true + + // not working on windows... + + return nil +} + +func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { + // not working on windows... + interceptor.intercepting = false + + return "", nil +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go new file mode 100644 index 00000000000..e6b4e9f32ce --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go @@ -0,0 +1,13 @@ +package remote_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestRemote(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Remote Spec Forwarding Suite") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go new file mode 100644 index 00000000000..297af2ebffc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go @@ -0,0 +1,224 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package remote + +import ( + "encoding/json" + "io/ioutil" + "net" + "net/http" + "sync" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" +) + +/* +Server spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type Server struct { + listener net.Listener + reporters []reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteData types.RemoteBeforeSuiteData + parallelTotal int + counter int +} + +//Create a new server, automatically selecting a port +func NewServer(parallelTotal int) (*Server, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &Server{ + listener: listener, + lock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}, + parallelTotal: parallelTotal, + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *Server) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin) + mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun) + mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun) + mux.HandleFunc("/SpecWillRun", server.specWillRun) + mux.HandleFunc("/SpecDidComplete", server.specDidComplete) + mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd) + + //synchronization endpoints + mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState) + mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *Server) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *Server) Address() string { + return "http://" + server.listener.Addr().String() +} + +// +// Streaming Endpoints +// + +//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *Server) readAll(request *http.Request) []byte { + defer request.Body.Close() + body, _ := ioutil.ReadAll(request.Body) + return body +} + +func (server *Server) RegisterReporters(reporters ...reporters.Reporter) { + server.reporters = reporters +} + +func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + + var data struct { + Config config.GinkgoConfigType `json:"config"` + Summary *types.SuiteSummary `json:"suite-summary"` + } + + json.Unmarshal(body, &data) + + for _, reporter := range server.reporters { + reporter.SpecSuiteWillBegin(data.Config, data.Summary) + } +} + +func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var setupSummary *types.SetupSummary + json.Unmarshal(body, &setupSummary) + + for _, reporter := range server.reporters { + reporter.BeforeSuiteDidRun(setupSummary) + } +} + +func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var setupSummary *types.SetupSummary + json.Unmarshal(body, &setupSummary) + + for _, reporter := range server.reporters { + reporter.AfterSuiteDidRun(setupSummary) + } +} + +func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var specSummary *types.SpecSummary + json.Unmarshal(body, &specSummary) + + for _, reporter := range server.reporters { + reporter.SpecWillRun(specSummary) + } +} + +func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var specSummary *types.SpecSummary + json.Unmarshal(body, &specSummary) + + for _, reporter := range server.reporters { + reporter.SpecDidComplete(specSummary) + } +} + +func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var suiteSummary *types.SuiteSummary + json.Unmarshal(body, &suiteSummary) + + for _, reporter := range server.reporters { + reporter.SpecSuiteDidEnd(suiteSummary) + } +} + +// +// Synchronization Endpoints +// + +func (server *Server) RegisterAlive(node int, alive func() bool) { + server.lock.Lock() + defer server.lock.Unlock() + server.alives[node-1] = alive +} + +func (server *Server) nodeIsAlive(node int) bool { + server.lock.Lock() + defer server.lock.Unlock() + alive := server.alives[node-1] + if alive == nil { + return true + } + return alive() +} + +func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + if request.Method == "POST" { + dec := json.NewDecoder(request.Body) + dec.Decode(&(server.beforeSuiteData)) + } else { + beforeSuiteData := server.beforeSuiteData + if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) { + beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared + } + enc := json.NewEncoder(writer) + enc.Encode(beforeSuiteData) + } +} + +func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) { + afterSuiteData := types.RemoteAfterSuiteData{ + CanRun: true, + } + for i := 2; i <= server.parallelTotal; i++ { + afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i) + } + + enc := json.NewEncoder(writer) + enc.Encode(afterSuiteData) +} + +func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) { + c := spec_iterator.Counter{} + server.lock.Lock() + c.Index = server.counter + server.counter = server.counter + 1 + server.lock.Unlock() + + json.NewEncoder(writer).Encode(c) +} + +func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) { + writer.Write([]byte("")) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go new file mode 100644 index 00000000000..eb2eefebe02 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go @@ -0,0 +1,269 @@ +package remote_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/remote" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + + "bytes" + "encoding/json" + "net/http" +) + +var _ = Describe("Server", func() { + var ( + server *Server + ) + + BeforeEach(func() { + var err error + server, err = NewServer(3) + Ω(err).ShouldNot(HaveOccurred()) + + server.Start() + }) + + AfterEach(func() { + server.Close() + }) + + Describe("Streaming endpoints", func() { + var ( + reporterA, reporterB *reporters.FakeReporter + forwardingReporter *ForwardingReporter + + suiteSummary *types.SuiteSummary + setupSummary *types.SetupSummary + specSummary *types.SpecSummary + ) + + BeforeEach(func() { + reporterA = reporters.NewFakeReporter() + reporterB = reporters.NewFakeReporter() + + server.RegisterReporters(reporterA, reporterB) + + forwardingReporter = NewForwardingReporter(server.Address(), &http.Client{}, &fakeOutputInterceptor{}) + + suiteSummary = &types.SuiteSummary{ + SuiteDescription: "My Test Suite", + } + + setupSummary = &types.SetupSummary{ + State: types.SpecStatePassed, + } + + specSummary = &types.SpecSummary{ + ComponentTexts: []string{"My", "Spec"}, + State: types.SpecStatePassed, + } + }) + + It("should make its address available", func() { + Ω(server.Address()).Should(MatchRegexp(`http://127.0.0.1:\d{2,}`)) + }) + + Describe("/SpecSuiteWillBegin", func() { + It("should decode and forward the Ginkgo config and suite summary", func(done Done) { + forwardingReporter.SpecSuiteWillBegin(config.GinkgoConfig, suiteSummary) + Ω(reporterA.Config).Should(Equal(config.GinkgoConfig)) + Ω(reporterB.Config).Should(Equal(config.GinkgoConfig)) + Ω(reporterA.BeginSummary).Should(Equal(suiteSummary)) + Ω(reporterB.BeginSummary).Should(Equal(suiteSummary)) + close(done) + }) + }) + + Describe("/BeforeSuiteDidRun", func() { + It("should decode and forward the setup summary", func() { + forwardingReporter.BeforeSuiteDidRun(setupSummary) + Ω(reporterA.BeforeSuiteSummary).Should(Equal(setupSummary)) + Ω(reporterB.BeforeSuiteSummary).Should(Equal(setupSummary)) + }) + }) + + Describe("/AfterSuiteDidRun", func() { + It("should decode and forward the setup summary", func() { + forwardingReporter.AfterSuiteDidRun(setupSummary) + Ω(reporterA.AfterSuiteSummary).Should(Equal(setupSummary)) + Ω(reporterB.AfterSuiteSummary).Should(Equal(setupSummary)) + }) + }) + + Describe("/SpecWillRun", func() { + It("should decode and forward the spec summary", func(done Done) { + forwardingReporter.SpecWillRun(specSummary) + Ω(reporterA.SpecWillRunSummaries[0]).Should(Equal(specSummary)) + Ω(reporterB.SpecWillRunSummaries[0]).Should(Equal(specSummary)) + close(done) + }) + }) + + Describe("/SpecDidComplete", func() { + It("should decode and forward the spec summary", func(done Done) { + forwardingReporter.SpecDidComplete(specSummary) + Ω(reporterA.SpecSummaries[0]).Should(Equal(specSummary)) + Ω(reporterB.SpecSummaries[0]).Should(Equal(specSummary)) + close(done) + }) + }) + + Describe("/SpecSuiteDidEnd", func() { + It("should decode and forward the suite summary", func(done Done) { + forwardingReporter.SpecSuiteDidEnd(suiteSummary) + Ω(reporterA.EndSummary).Should(Equal(suiteSummary)) + Ω(reporterB.EndSummary).Should(Equal(suiteSummary)) + close(done) + }) + }) + }) + + Describe("Synchronization endpoints", func() { + Describe("GETting and POSTing BeforeSuiteState", func() { + getBeforeSuite := func() types.RemoteBeforeSuiteData { + resp, err := http.Get(server.Address() + "/BeforeSuiteState") + Ω(err).ShouldNot(HaveOccurred()) + Ω(resp.StatusCode).Should(Equal(http.StatusOK)) + + r := types.RemoteBeforeSuiteData{} + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&r) + Ω(err).ShouldNot(HaveOccurred()) + + return r + } + + postBeforeSuite := func(r types.RemoteBeforeSuiteData) { + resp, err := http.Post(server.Address()+"/BeforeSuiteState", "application/json", bytes.NewReader(r.ToJSON())) + Ω(err).ShouldNot(HaveOccurred()) + Ω(resp.StatusCode).Should(Equal(http.StatusOK)) + } + + Context("when the first node's Alive has not been registered yet", func() { + It("should return pending", func() { + state := getBeforeSuite() + Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending})) + + state = getBeforeSuite() + Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending})) + }) + }) + + Context("when the first node is Alive but has not responded yet", func() { + BeforeEach(func() { + server.RegisterAlive(1, func() bool { + return true + }) + }) + + It("should return pending", func() { + state := getBeforeSuite() + Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending})) + + state = getBeforeSuite() + Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending})) + }) + }) + + Context("when the first node has responded", func() { + var state types.RemoteBeforeSuiteData + BeforeEach(func() { + server.RegisterAlive(1, func() bool { + return false + }) + + state = types.RemoteBeforeSuiteData{ + Data: []byte("my data"), + State: types.RemoteBeforeSuiteStatePassed, + } + postBeforeSuite(state) + }) + + It("should return the passed in state", func() { + returnedState := getBeforeSuite() + Ω(returnedState).Should(Equal(state)) + }) + }) + + Context("when the first node is no longer Alive and has not responded yet", func() { + BeforeEach(func() { + server.RegisterAlive(1, func() bool { + return false + }) + }) + + It("should return disappeared", func() { + state := getBeforeSuite() + Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStateDisappeared})) + + state = getBeforeSuite() + Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStateDisappeared})) + }) + }) + }) + + Describe("GETting RemoteAfterSuiteData", func() { + getRemoteAfterSuiteData := func() bool { + resp, err := http.Get(server.Address() + "/RemoteAfterSuiteData") + Ω(err).ShouldNot(HaveOccurred()) + Ω(resp.StatusCode).Should(Equal(http.StatusOK)) + + a := types.RemoteAfterSuiteData{} + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&a) + Ω(err).ShouldNot(HaveOccurred()) + + return a.CanRun + } + + Context("when there are unregistered nodes", func() { + BeforeEach(func() { + server.RegisterAlive(2, func() bool { + return false + }) + }) + + It("should return false", func() { + Ω(getRemoteAfterSuiteData()).Should(BeFalse()) + }) + }) + + Context("when all none-node-1 nodes are still running", func() { + BeforeEach(func() { + server.RegisterAlive(2, func() bool { + return true + }) + + server.RegisterAlive(3, func() bool { + return false + }) + }) + + It("should return false", func() { + Ω(getRemoteAfterSuiteData()).Should(BeFalse()) + }) + }) + + Context("when all none-1 nodes are done", func() { + BeforeEach(func() { + server.RegisterAlive(2, func() bool { + return false + }) + + server.RegisterAlive(3, func() bool { + return false + }) + }) + + It("should return true", func() { + Ω(getRemoteAfterSuiteData()).Should(BeTrue()) + }) + + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go new file mode 100644 index 00000000000..5c59728ea9b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go @@ -0,0 +1,11 @@ +// +build linux,arm64 + +package remote + +import "syscall" + +// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so +// use the nearly identical syscall.Dup3 instead +func syscallDup(oldfd int, newfd int) (err error) { + return syscall.Dup3(oldfd, newfd, 0) +} \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go new file mode 100644 index 00000000000..ecf9cafb664 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go @@ -0,0 +1,9 @@ +// +build solaris + +package remote + +import "golang.org/x/sys/unix" + +func syscallDup(oldfd int, newfd int) (err error) { + return unix.Dup2(oldfd, newfd) +} \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go new file mode 100644 index 00000000000..cacdd0e6496 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go @@ -0,0 +1,11 @@ +// +build !linux !arm64 +// +build !windows +// +build !solaris + +package remote + +import "syscall" + +func syscallDup(oldfd int, newfd int) (err error) { + return syscall.Dup2(oldfd, newfd) +} \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go new file mode 100644 index 00000000000..d32dec6997c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go @@ -0,0 +1,206 @@ +package spec + +import ( + "fmt" + "io" + "time" + + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +type Spec struct { + subject leafnodes.SubjectNode + focused bool + announceProgress bool + + containers []*containernode.ContainerNode + + state types.SpecState + runTime time.Duration + failure types.SpecFailure + previousFailures bool +} + +func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec { + spec := &Spec{ + subject: subject, + containers: containers, + focused: subject.Flag() == types.FlagTypeFocused, + announceProgress: announceProgress, + } + + spec.processFlag(subject.Flag()) + for i := len(containers) - 1; i >= 0; i-- { + spec.processFlag(containers[i].Flag()) + } + + return spec +} + +func (spec *Spec) processFlag(flag types.FlagType) { + if flag == types.FlagTypeFocused { + spec.focused = true + } else if flag == types.FlagTypePending { + spec.state = types.SpecStatePending + } +} + +func (spec *Spec) Skip() { + spec.state = types.SpecStateSkipped +} + +func (spec *Spec) Failed() bool { + return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut +} + +func (spec *Spec) Passed() bool { + return spec.state == types.SpecStatePassed +} + +func (spec *Spec) Flaked() bool { + return spec.state == types.SpecStatePassed && spec.previousFailures +} + +func (spec *Spec) Pending() bool { + return spec.state == types.SpecStatePending +} + +func (spec *Spec) Skipped() bool { + return spec.state == types.SpecStateSkipped +} + +func (spec *Spec) Focused() bool { + return spec.focused +} + +func (spec *Spec) IsMeasurement() bool { + return spec.subject.Type() == types.SpecComponentTypeMeasure +} + +func (spec *Spec) Summary(suiteID string) *types.SpecSummary { + componentTexts := make([]string, len(spec.containers)+1) + componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1) + + for i, container := range spec.containers { + componentTexts[i] = container.Text() + componentCodeLocations[i] = container.CodeLocation() + } + + componentTexts[len(spec.containers)] = spec.subject.Text() + componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation() + + return &types.SpecSummary{ + IsMeasurement: spec.IsMeasurement(), + NumberOfSamples: spec.subject.Samples(), + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.state, + RunTime: spec.runTime, + Failure: spec.failure, + Measurements: spec.measurementsReport(), + SuiteID: suiteID, + } +} + +func (spec *Spec) ConcatenatedString() string { + s := "" + for _, container := range spec.containers { + s += container.Text() + " " + } + + return s + spec.subject.Text() +} + +func (spec *Spec) Run(writer io.Writer) { + if spec.state == types.SpecStateFailed { + spec.previousFailures = true + } + + startTime := time.Now() + defer func() { + spec.runTime = time.Since(startTime) + }() + + for sample := 0; sample < spec.subject.Samples(); sample++ { + spec.runSample(sample, writer) + + if spec.state != types.SpecStatePassed { + return + } + } +} + +func (spec *Spec) runSample(sample int, writer io.Writer) { + spec.state = types.SpecStatePassed + spec.failure = types.SpecFailure{} + innerMostContainerIndexToUnwind := -1 + + defer func() { + for i := innerMostContainerIndexToUnwind; i >= 0; i-- { + container := spec.containers[i] + for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) { + spec.announceSetupNode(writer, "AfterEach", container, afterEach) + afterEachState, afterEachFailure := afterEach.Run() + if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed { + spec.state = afterEachState + spec.failure = afterEachFailure + } + } + } + }() + + for i, container := range spec.containers { + innerMostContainerIndexToUnwind = i + for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) { + spec.announceSetupNode(writer, "BeforeEach", container, beforeEach) + spec.state, spec.failure = beforeEach.Run() + if spec.state != types.SpecStatePassed { + return + } + } + } + + for _, container := range spec.containers { + for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) { + spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach) + spec.state, spec.failure = justBeforeEach.Run() + if spec.state != types.SpecStatePassed { + return + } + } + } + + spec.announceSubject(writer, spec.subject) + spec.state, spec.failure = spec.subject.Run() +} + +func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) { + if spec.announceProgress { + s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String()) + writer.Write([]byte(s)) + } +} + +func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) { + if spec.announceProgress { + nodeType := "" + switch subject.Type() { + case types.SpecComponentTypeIt: + nodeType = "It" + case types.SpecComponentTypeMeasure: + nodeType = "Measure" + } + s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String()) + writer.Write([]byte(s)) + } +} + +func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement { + if !spec.IsMeasurement() || spec.Failed() { + return map[string]*types.SpecMeasurement{} + } + + return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport() +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go new file mode 100644 index 00000000000..8681a720684 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go @@ -0,0 +1,13 @@ +package spec_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestSpec(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Spec Suite") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go new file mode 100644 index 00000000000..3bab8887cd2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec_test.go @@ -0,0 +1,657 @@ +package spec_test + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gbytes" + + . "github.com/onsi/ginkgo/internal/spec" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/containernode" + Failer "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +var noneFlag = types.FlagTypeNone +var focusedFlag = types.FlagTypeFocused +var pendingFlag = types.FlagTypePending + +var _ = Describe("Spec", func() { + var ( + failer *Failer.Failer + codeLocation types.CodeLocation + nodesThatRan []string + spec *Spec + buffer *gbytes.Buffer + ) + + newBody := func(text string, fail bool) func() { + return func() { + nodesThatRan = append(nodesThatRan, text) + if fail { + failer.Fail(text, codeLocation) + } + } + } + + newIt := func(text string, flag types.FlagType, fail bool) *leafnodes.ItNode { + return leafnodes.NewItNode(text, newBody(text, fail), flag, codeLocation, 0, failer, 0) + } + + newItWithBody := func(text string, body interface{}) *leafnodes.ItNode { + return leafnodes.NewItNode(text, body, noneFlag, codeLocation, 0, failer, 0) + } + + newMeasure := func(text string, flag types.FlagType, fail bool, samples int) *leafnodes.MeasureNode { + return leafnodes.NewMeasureNode(text, func(Benchmarker) { + nodesThatRan = append(nodesThatRan, text) + if fail { + failer.Fail(text, codeLocation) + } + }, flag, codeLocation, samples, failer, 0) + } + + newBef := func(text string, fail bool) leafnodes.BasicNode { + return leafnodes.NewBeforeEachNode(newBody(text, fail), codeLocation, 0, failer, 0) + } + + newAft := func(text string, fail bool) leafnodes.BasicNode { + return leafnodes.NewAfterEachNode(newBody(text, fail), codeLocation, 0, failer, 0) + } + + newJusBef := func(text string, fail bool) leafnodes.BasicNode { + return leafnodes.NewJustBeforeEachNode(newBody(text, fail), codeLocation, 0, failer, 0) + } + + newContainer := func(text string, flag types.FlagType, setupNodes ...leafnodes.BasicNode) *containernode.ContainerNode { + c := containernode.New(text, flag, codeLocation) + for _, node := range setupNodes { + c.PushSetupNode(node) + } + return c + } + + containers := func(containers ...*containernode.ContainerNode) []*containernode.ContainerNode { + return containers + } + + BeforeEach(func() { + buffer = gbytes.NewBuffer() + failer = Failer.New() + codeLocation = codelocation.New(0) + nodesThatRan = []string{} + }) + + Describe("marking specs focused and pending", func() { + It("should satisfy various caes", func() { + cases := []struct { + ContainerFlags []types.FlagType + SubjectFlag types.FlagType + Pending bool + Focused bool + }{ + {[]types.FlagType{}, noneFlag, false, false}, + {[]types.FlagType{}, focusedFlag, false, true}, + {[]types.FlagType{}, pendingFlag, true, false}, + {[]types.FlagType{noneFlag}, noneFlag, false, false}, + {[]types.FlagType{focusedFlag}, noneFlag, false, true}, + {[]types.FlagType{pendingFlag}, noneFlag, true, false}, + {[]types.FlagType{noneFlag}, focusedFlag, false, true}, + {[]types.FlagType{focusedFlag}, focusedFlag, false, true}, + {[]types.FlagType{pendingFlag}, focusedFlag, true, true}, + {[]types.FlagType{noneFlag}, pendingFlag, true, false}, + {[]types.FlagType{focusedFlag}, pendingFlag, true, true}, + {[]types.FlagType{pendingFlag}, pendingFlag, true, false}, + {[]types.FlagType{focusedFlag, noneFlag}, noneFlag, false, true}, + {[]types.FlagType{noneFlag, focusedFlag}, noneFlag, false, true}, + {[]types.FlagType{pendingFlag, noneFlag}, noneFlag, true, false}, + {[]types.FlagType{noneFlag, pendingFlag}, noneFlag, true, false}, + {[]types.FlagType{focusedFlag, pendingFlag}, noneFlag, true, true}, + } + + for i, c := range cases { + subject := newIt("it node", c.SubjectFlag, false) + containers := []*containernode.ContainerNode{} + for _, flag := range c.ContainerFlags { + containers = append(containers, newContainer("container", flag)) + } + + spec := New(subject, containers, false) + Ω(spec.Pending()).Should(Equal(c.Pending), "Case %d: %#v", i, c) + Ω(spec.Focused()).Should(Equal(c.Focused), "Case %d: %#v", i, c) + + if c.Pending { + Ω(spec.Summary("").State).Should(Equal(types.SpecStatePending)) + } + } + }) + }) + + Describe("Skip", func() { + It("should be skipped", func() { + spec := New(newIt("it node", noneFlag, false), containers(newContainer("container", noneFlag)), false) + Ω(spec.Skipped()).Should(BeFalse()) + spec.Skip() + Ω(spec.Skipped()).Should(BeTrue()) + Ω(spec.Summary("").State).Should(Equal(types.SpecStateSkipped)) + }) + }) + + Describe("IsMeasurement", func() { + It("should be true if the subject is a measurement node", func() { + spec := New(newIt("it node", noneFlag, false), containers(newContainer("container", noneFlag)), false) + Ω(spec.IsMeasurement()).Should(BeFalse()) + Ω(spec.Summary("").IsMeasurement).Should(BeFalse()) + Ω(spec.Summary("").NumberOfSamples).Should(Equal(1)) + + spec = New(newMeasure("measure node", noneFlag, false, 10), containers(newContainer("container", noneFlag)), false) + Ω(spec.IsMeasurement()).Should(BeTrue()) + Ω(spec.Summary("").IsMeasurement).Should(BeTrue()) + Ω(spec.Summary("").NumberOfSamples).Should(Equal(10)) + }) + }) + + Describe("Passed", func() { + It("should pass when the subject passed", func() { + spec := New(newIt("it node", noneFlag, false), containers(), false) + spec.Run(buffer) + + Ω(spec.Passed()).Should(BeTrue()) + Ω(spec.Failed()).Should(BeFalse()) + Ω(spec.Summary("").State).Should(Equal(types.SpecStatePassed)) + Ω(spec.Summary("").Failure).Should(BeZero()) + }) + }) + + Describe("Flaked", func() { + It("should work if Run is called twice and gets different results", func() { + i := 0 + spec := New(newItWithBody("flaky it", func() { + i++ + if i == 1 { + failer.Fail("oops", codeLocation) + } + }), containers(), false) + spec.Run(buffer) + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(spec.Flaked()).Should(BeFalse()) + Ω(spec.Summary("").State).Should(Equal(types.SpecStateFailed)) + Ω(spec.Summary("").Failure.Message).Should(Equal("oops")) + spec.Run(buffer) + Ω(spec.Passed()).Should(BeTrue()) + Ω(spec.Failed()).Should(BeFalse()) + Ω(spec.Flaked()).Should(BeTrue()) + Ω(spec.Summary("").State).Should(Equal(types.SpecStatePassed)) + }) + }) + + Describe("Failed", func() { + It("should be failed if the failure was panic", func() { + spec := New(newItWithBody("panicky it", func() { + panic("bam") + }), containers(), false) + spec.Run(buffer) + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(spec.Summary("").State).Should(Equal(types.SpecStatePanicked)) + Ω(spec.Summary("").Failure.Message).Should(Equal("Test Panicked")) + Ω(spec.Summary("").Failure.ForwardedPanic).Should(Equal("bam")) + }) + + It("should be failed if the failure was a timeout", func() { + spec := New(newItWithBody("sleepy it", func(done Done) {}), containers(), false) + spec.Run(buffer) + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(spec.Summary("").State).Should(Equal(types.SpecStateTimedOut)) + Ω(spec.Summary("").Failure.Message).Should(Equal("Timed out")) + }) + + It("should be failed if the failure was... a failure", func() { + spec := New(newItWithBody("failing it", func() { + failer.Fail("bam", codeLocation) + }), containers(), false) + spec.Run(buffer) + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(spec.Summary("").State).Should(Equal(types.SpecStateFailed)) + Ω(spec.Summary("").Failure.Message).Should(Equal("bam")) + }) + }) + + Describe("Concatenated string", func() { + It("should concatenate the texts of the containers and the subject", func() { + spec := New( + newIt("it node", noneFlag, false), + containers( + newContainer("outer container", noneFlag), + newContainer("inner container", noneFlag), + ), + false, + ) + + Ω(spec.ConcatenatedString()).Should(Equal("outer container inner container it node")) + }) + }) + + Describe("running it specs", func() { + Context("with just an it", func() { + Context("that succeeds", func() { + It("should run the it and report on its success", func() { + spec := New(newIt("it node", noneFlag, false), containers(), false) + spec.Run(buffer) + Ω(spec.Passed()).Should(BeTrue()) + Ω(spec.Failed()).Should(BeFalse()) + Ω(nodesThatRan).Should(Equal([]string{"it node"})) + }) + }) + + Context("that fails", func() { + It("should run the it and report on its success", func() { + spec := New(newIt("it node", noneFlag, true), containers(), false) + spec.Run(buffer) + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(spec.Summary("").Failure.Message).Should(Equal("it node")) + Ω(nodesThatRan).Should(Equal([]string{"it node"})) + }) + }) + }) + + Context("with a full set of setup nodes", func() { + var failingNodes map[string]bool + + BeforeEach(func() { + failingNodes = map[string]bool{} + }) + + JustBeforeEach(func() { + spec = New( + newIt("it node", noneFlag, failingNodes["it node"]), + containers( + newContainer("outer container", noneFlag, + newBef("outer bef A", failingNodes["outer bef A"]), + newBef("outer bef B", failingNodes["outer bef B"]), + newJusBef("outer jusbef A", failingNodes["outer jusbef A"]), + newJusBef("outer jusbef B", failingNodes["outer jusbef B"]), + newAft("outer aft A", failingNodes["outer aft A"]), + newAft("outer aft B", failingNodes["outer aft B"]), + ), + newContainer("inner container", noneFlag, + newBef("inner bef A", failingNodes["inner bef A"]), + newBef("inner bef B", failingNodes["inner bef B"]), + newJusBef("inner jusbef A", failingNodes["inner jusbef A"]), + newJusBef("inner jusbef B", failingNodes["inner jusbef B"]), + newAft("inner aft A", failingNodes["inner aft A"]), + newAft("inner aft B", failingNodes["inner aft B"]), + ), + ), + false, + ) + spec.Run(buffer) + }) + + Context("that all pass", func() { + It("should walk through the nodes in the correct order", func() { + Ω(spec.Passed()).Should(BeTrue()) + Ω(spec.Failed()).Should(BeFalse()) + Ω(nodesThatRan).Should(Equal([]string{ + "outer bef A", + "outer bef B", + "inner bef A", + "inner bef B", + "outer jusbef A", + "outer jusbef B", + "inner jusbef A", + "inner jusbef B", + "it node", + "inner aft A", + "inner aft B", + "outer aft A", + "outer aft B", + })) + }) + }) + + Context("when the subject fails", func() { + BeforeEach(func() { + failingNodes["it node"] = true + }) + + It("should run the afters", func() { + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(nodesThatRan).Should(Equal([]string{ + "outer bef A", + "outer bef B", + "inner bef A", + "inner bef B", + "outer jusbef A", + "outer jusbef B", + "inner jusbef A", + "inner jusbef B", + "it node", + "inner aft A", + "inner aft B", + "outer aft A", + "outer aft B", + })) + Ω(spec.Summary("").Failure.Message).Should(Equal("it node")) + }) + }) + + Context("when an inner before fails", func() { + BeforeEach(func() { + failingNodes["inner bef A"] = true + }) + + It("should not run any other befores, but it should run the subsequent afters", func() { + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(nodesThatRan).Should(Equal([]string{ + "outer bef A", + "outer bef B", + "inner bef A", + "inner aft A", + "inner aft B", + "outer aft A", + "outer aft B", + })) + Ω(spec.Summary("").Failure.Message).Should(Equal("inner bef A")) + }) + }) + + Context("when an outer before fails", func() { + BeforeEach(func() { + failingNodes["outer bef B"] = true + }) + + It("should not run any other befores, but it should run the subsequent afters", func() { + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(nodesThatRan).Should(Equal([]string{ + "outer bef A", + "outer bef B", + "outer aft A", + "outer aft B", + })) + Ω(spec.Summary("").Failure.Message).Should(Equal("outer bef B")) + }) + }) + + Context("when an after fails", func() { + BeforeEach(func() { + failingNodes["inner aft B"] = true + }) + + It("should run all other afters, but mark the test as failed", func() { + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(nodesThatRan).Should(Equal([]string{ + "outer bef A", + "outer bef B", + "inner bef A", + "inner bef B", + "outer jusbef A", + "outer jusbef B", + "inner jusbef A", + "inner jusbef B", + "it node", + "inner aft A", + "inner aft B", + "outer aft A", + "outer aft B", + })) + Ω(spec.Summary("").Failure.Message).Should(Equal("inner aft B")) + }) + }) + + Context("when a just before each fails", func() { + BeforeEach(func() { + failingNodes["outer jusbef B"] = true + }) + + It("should run the afters, but not the subject", func() { + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(nodesThatRan).Should(Equal([]string{ + "outer bef A", + "outer bef B", + "inner bef A", + "inner bef B", + "outer jusbef A", + "outer jusbef B", + "inner aft A", + "inner aft B", + "outer aft A", + "outer aft B", + })) + Ω(spec.Summary("").Failure.Message).Should(Equal("outer jusbef B")) + }) + }) + + Context("when an after fails after an earlier node has failed", func() { + BeforeEach(func() { + failingNodes["it node"] = true + failingNodes["inner aft B"] = true + }) + + It("should record the earlier failure", func() { + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(nodesThatRan).Should(Equal([]string{ + "outer bef A", + "outer bef B", + "inner bef A", + "inner bef B", + "outer jusbef A", + "outer jusbef B", + "inner jusbef A", + "inner jusbef B", + "it node", + "inner aft A", + "inner aft B", + "outer aft A", + "outer aft B", + })) + Ω(spec.Summary("").Failure.Message).Should(Equal("it node")) + }) + }) + }) + }) + + Describe("running measurement specs", func() { + Context("when the measurement succeeds", func() { + It("should run N samples", func() { + spec = New( + newMeasure("measure node", noneFlag, false, 3), + containers( + newContainer("container", noneFlag, + newBef("bef A", false), + newJusBef("jusbef A", false), + newAft("aft A", false), + ), + ), + false, + ) + spec.Run(buffer) + + Ω(spec.Passed()).Should(BeTrue()) + Ω(spec.Failed()).Should(BeFalse()) + Ω(nodesThatRan).Should(Equal([]string{ + "bef A", + "jusbef A", + "measure node", + "aft A", + "bef A", + "jusbef A", + "measure node", + "aft A", + "bef A", + "jusbef A", + "measure node", + "aft A", + })) + }) + }) + + Context("when the measurement fails", func() { + It("should bail after the failure occurs", func() { + spec = New( + newMeasure("measure node", noneFlag, true, 3), + containers( + newContainer("container", noneFlag, + newBef("bef A", false), + newJusBef("jusbef A", false), + newAft("aft A", false), + ), + ), + false, + ) + spec.Run(buffer) + + Ω(spec.Passed()).Should(BeFalse()) + Ω(spec.Failed()).Should(BeTrue()) + Ω(nodesThatRan).Should(Equal([]string{ + "bef A", + "jusbef A", + "measure node", + "aft A", + })) + }) + }) + }) + + Describe("Summary", func() { + var ( + subjectCodeLocation types.CodeLocation + outerContainerCodeLocation types.CodeLocation + innerContainerCodeLocation types.CodeLocation + summary *types.SpecSummary + ) + + BeforeEach(func() { + subjectCodeLocation = codelocation.New(0) + outerContainerCodeLocation = codelocation.New(0) + innerContainerCodeLocation = codelocation.New(0) + + spec = New( + leafnodes.NewItNode("it node", func() { + time.Sleep(10 * time.Millisecond) + }, noneFlag, subjectCodeLocation, 0, failer, 0), + containers( + containernode.New("outer container", noneFlag, outerContainerCodeLocation), + containernode.New("inner container", noneFlag, innerContainerCodeLocation), + ), + false, + ) + + spec.Run(buffer) + Ω(spec.Passed()).Should(BeTrue()) + summary = spec.Summary("suite id") + }) + + It("should have the suite id", func() { + Ω(summary.SuiteID).Should(Equal("suite id")) + }) + + It("should have the component texts and code locations", func() { + Ω(summary.ComponentTexts).Should(Equal([]string{"outer container", "inner container", "it node"})) + Ω(summary.ComponentCodeLocations).Should(Equal([]types.CodeLocation{outerContainerCodeLocation, innerContainerCodeLocation, subjectCodeLocation})) + }) + + It("should have a runtime", func() { + Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond)) + }) + + It("should not be a measurement, or have a measurement summary", func() { + Ω(summary.IsMeasurement).Should(BeFalse()) + Ω(summary.Measurements).Should(BeEmpty()) + }) + }) + + Describe("Summaries for measurements", func() { + var summary *types.SpecSummary + + BeforeEach(func() { + spec = New(leafnodes.NewMeasureNode("measure node", func(b Benchmarker) { + b.RecordValue("a value", 7, "some info") + b.RecordValueWithPrecision("another value", 8, "ns", 5, "more info") + }, noneFlag, codeLocation, 4, failer, 0), containers(), false) + spec.Run(buffer) + Ω(spec.Passed()).Should(BeTrue()) + summary = spec.Summary("suite id") + }) + + It("should include the number of samples", func() { + Ω(summary.NumberOfSamples).Should(Equal(4)) + }) + + It("should be a measurement", func() { + Ω(summary.IsMeasurement).Should(BeTrue()) + }) + + It("should have the measurements report", func() { + Ω(summary.Measurements).Should(HaveKey("a value")) + report := summary.Measurements["a value"] + Ω(report.Name).Should(Equal("a value")) + Ω(report.Info).Should(Equal("some info")) + Ω(report.Results).Should(Equal([]float64{7, 7, 7, 7})) + + Ω(summary.Measurements).Should(HaveKey("another value")) + report = summary.Measurements["another value"] + Ω(report.Name).Should(Equal("another value")) + Ω(report.Info).Should(Equal("more info")) + Ω(report.Results).Should(Equal([]float64{8, 8, 8, 8})) + Ω(report.Units).Should(Equal("ns")) + Ω(report.Precision).Should(Equal(5)) + }) + }) + + Describe("When told to emit progress", func() { + It("should emit progress to the writer as it runs Befores, JustBefores, Afters, and Its", func() { + spec = New( + newIt("it node", noneFlag, false), + containers( + newContainer("outer container", noneFlag, + newBef("outer bef A", false), + newJusBef("outer jusbef A", false), + newAft("outer aft A", false), + ), + newContainer("inner container", noneFlag, + newBef("inner bef A", false), + newJusBef("inner jusbef A", false), + newAft("inner aft A", false), + ), + ), + true, + ) + spec.Run(buffer) + + Ω(buffer).Should(gbytes.Say(`\[BeforeEach\] outer container`)) + Ω(buffer).Should(gbytes.Say(`\[BeforeEach\] inner container`)) + Ω(buffer).Should(gbytes.Say(`\[JustBeforeEach\] outer container`)) + Ω(buffer).Should(gbytes.Say(`\[JustBeforeEach\] inner container`)) + Ω(buffer).Should(gbytes.Say(`\[It\] it node`)) + Ω(buffer).Should(gbytes.Say(`\[AfterEach\] inner container`)) + Ω(buffer).Should(gbytes.Say(`\[AfterEach\] outer container`)) + }) + + It("should emit progress to the writer as it runs Befores, JustBefores, Afters, and Measures", func() { + spec = New( + newMeasure("measure node", noneFlag, false, 2), + containers(), + true, + ) + spec.Run(buffer) + + Ω(buffer).Should(gbytes.Say(`\[Measure\] measure node`)) + Ω(buffer).Should(gbytes.Say(`\[Measure\] measure node`)) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go new file mode 100644 index 00000000000..006185ab5d8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go @@ -0,0 +1,123 @@ +package spec + +import ( + "math/rand" + "regexp" + "sort" +) + +type Specs struct { + specs []*Spec + hasProgrammaticFocus bool + RegexScansFilePath bool +} + +func NewSpecs(specs []*Spec) *Specs { + return &Specs{ + specs: specs, + } +} + +func (e *Specs) Specs() []*Spec { + return e.specs +} + +func (e *Specs) HasProgrammaticFocus() bool { + return e.hasProgrammaticFocus +} + +func (e *Specs) Shuffle(r *rand.Rand) { + sort.Sort(e) + permutation := r.Perm(len(e.specs)) + shuffledSpecs := make([]*Spec, len(e.specs)) + for i, j := range permutation { + shuffledSpecs[i] = e.specs[j] + } + e.specs = shuffledSpecs +} + +func (e *Specs) ApplyFocus(description string, focusString string, skipString string) { + if focusString == "" && skipString == "" { + e.applyProgrammaticFocus() + } else { + e.applyRegExpFocusAndSkip(description, focusString, skipString) + } +} + +func (e *Specs) applyProgrammaticFocus() { + e.hasProgrammaticFocus = false + for _, spec := range e.specs { + if spec.Focused() && !spec.Pending() { + e.hasProgrammaticFocus = true + break + } + } + + if e.hasProgrammaticFocus { + for _, spec := range e.specs { + if !spec.Focused() { + spec.Skip() + } + } + } +} + +// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function, +// this is the place which we append to. +func (e *Specs) toMatch(description string, spec *Spec) []byte { + if e.RegexScansFilePath { + return []byte( + description + " " + + spec.ConcatenatedString() + " " + + spec.subject.CodeLocation().FileName) + } else { + return []byte( + description + " " + + spec.ConcatenatedString()) + } +} + +func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) { + for _, spec := range e.specs { + matchesFocus := true + matchesSkip := false + + toMatch := e.toMatch(description, spec) + + if focusString != "" { + focusFilter := regexp.MustCompile(focusString) + matchesFocus = focusFilter.Match([]byte(toMatch)) + } + + if skipString != "" { + skipFilter := regexp.MustCompile(skipString) + matchesSkip = skipFilter.Match([]byte(toMatch)) + } + + if !matchesFocus || matchesSkip { + spec.Skip() + } + } +} + +func (e *Specs) SkipMeasurements() { + for _, spec := range e.specs { + if spec.IsMeasurement() { + spec.Skip() + } + } +} + +//sort.Interface + +func (e *Specs) Len() int { + return len(e.specs) +} + +func (e *Specs) Less(i, j int) bool { + return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString() +} + +func (e *Specs) Swap(i, j int) { + e.specs[i], e.specs[j] = e.specs[j], e.specs[i] +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go new file mode 100644 index 00000000000..066fbbb3ae0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs_test.go @@ -0,0 +1,287 @@ +package spec_test + +import ( + "math/rand" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/spec" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +var _ = Describe("Specs", func() { + var specs *Specs + + newSpec := func(text string, flag types.FlagType) *Spec { + subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0) + return New(subject, []*containernode.ContainerNode{}, false) + } + + newMeasureSpec := func(text string, flag types.FlagType) *Spec { + subject := leafnodes.NewMeasureNode(text, func(Benchmarker) {}, flag, codelocation.New(0), 0, nil, 0) + return New(subject, []*containernode.ContainerNode{}, false) + } + + newSpecs := func(args ...interface{}) *Specs { + specs := []*Spec{} + for index := 0; index < len(args)-1; index += 2 { + specs = append(specs, newSpec(args[index].(string), args[index+1].(types.FlagType))) + } + return NewSpecs(specs) + } + + specTexts := func(specs *Specs) []string { + texts := []string{} + for _, spec := range specs.Specs() { + texts = append(texts, spec.ConcatenatedString()) + } + return texts + } + + willRunTexts := func(specs *Specs) []string { + texts := []string{} + for _, spec := range specs.Specs() { + if !(spec.Skipped() || spec.Pending()) { + texts = append(texts, spec.ConcatenatedString()) + } + } + return texts + } + + skippedTexts := func(specs *Specs) []string { + texts := []string{} + for _, spec := range specs.Specs() { + if spec.Skipped() { + texts = append(texts, spec.ConcatenatedString()) + } + } + return texts + } + + pendingTexts := func(specs *Specs) []string { + texts := []string{} + for _, spec := range specs.Specs() { + if spec.Pending() { + texts = append(texts, spec.ConcatenatedString()) + } + } + return texts + } + + Describe("Shuffling specs", func() { + It("should shuffle the specs using the passed in randomizer", func() { + specs17 := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag) + specs17.Shuffle(rand.New(rand.NewSource(17))) + texts17 := specTexts(specs17) + + specs17Again := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag) + specs17Again.Shuffle(rand.New(rand.NewSource(17))) + texts17Again := specTexts(specs17Again) + + specs15 := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag) + specs15.Shuffle(rand.New(rand.NewSource(15))) + texts15 := specTexts(specs15) + + specsUnshuffled := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag) + textsUnshuffled := specTexts(specsUnshuffled) + + Ω(textsUnshuffled).Should(Equal([]string{"C", "A", "B"})) + + Ω(texts17).Should(Equal(texts17Again)) + Ω(texts17).ShouldNot(Equal(texts15)) + Ω(texts17).ShouldNot(Equal(textsUnshuffled)) + Ω(texts15).ShouldNot(Equal(textsUnshuffled)) + + Ω(texts17).Should(HaveLen(3)) + Ω(texts17).Should(ContainElement("A")) + Ω(texts17).Should(ContainElement("B")) + Ω(texts17).Should(ContainElement("C")) + + Ω(texts15).Should(HaveLen(3)) + Ω(texts15).Should(ContainElement("A")) + Ω(texts15).Should(ContainElement("B")) + Ω(texts15).Should(ContainElement("C")) + }) + }) + + Describe("with no programmatic focus", func() { + BeforeEach(func() { + specs = newSpecs("A1", noneFlag, "A2", noneFlag, "B1", noneFlag, "B2", pendingFlag) + specs.ApplyFocus("", "", "") + }) + + It("should not report as having programmatic specs", func() { + Ω(specs.HasProgrammaticFocus()).Should(BeFalse()) + }) + }) + + Describe("Applying focus/skip", func() { + var description, focusString, skipString string + + BeforeEach(func() { + description, focusString, skipString = "", "", "" + }) + + JustBeforeEach(func() { + specs = newSpecs("A1", focusedFlag, "A2", noneFlag, "B1", focusedFlag, "B2", pendingFlag) + specs.ApplyFocus(description, focusString, skipString) + }) + + Context("with neither a focus string nor a skip string", func() { + It("should apply the programmatic focus", func() { + Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "B1"})) + Ω(skippedTexts(specs)).Should(Equal([]string{"A2", "B2"})) + Ω(pendingTexts(specs)).Should(BeEmpty()) + }) + + It("should report as having programmatic specs", func() { + Ω(specs.HasProgrammaticFocus()).Should(BeTrue()) + }) + }) + + Context("with a focus regexp", func() { + BeforeEach(func() { + focusString = "A" + }) + + It("should override the programmatic focus", func() { + Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "A2"})) + Ω(skippedTexts(specs)).Should(Equal([]string{"B1", "B2"})) + Ω(pendingTexts(specs)).Should(BeEmpty()) + }) + + It("should not report as having programmatic specs", func() { + Ω(specs.HasProgrammaticFocus()).Should(BeFalse()) + }) + }) + + Context("with a focus regexp", func() { + BeforeEach(func() { + focusString = "B" + }) + + It("should not override any pendings", func() { + Ω(willRunTexts(specs)).Should(Equal([]string{"B1"})) + Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2"})) + Ω(pendingTexts(specs)).Should(Equal([]string{"B2"})) + }) + }) + + Context("with a description", func() { + BeforeEach(func() { + description = "C" + focusString = "C" + }) + + It("should include the description in the focus determination", func() { + Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "A2", "B1"})) + Ω(skippedTexts(specs)).Should(BeEmpty()) + Ω(pendingTexts(specs)).Should(Equal([]string{"B2"})) + }) + }) + + Context("with a description", func() { + BeforeEach(func() { + description = "C" + skipString = "C" + }) + + It("should include the description in the focus determination", func() { + Ω(willRunTexts(specs)).Should(BeEmpty()) + Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2", "B1", "B2"})) + Ω(pendingTexts(specs)).Should(BeEmpty()) + }) + }) + + Context("with a skip regexp", func() { + BeforeEach(func() { + skipString = "A" + }) + + It("should override the programmatic focus", func() { + Ω(willRunTexts(specs)).Should(Equal([]string{"B1"})) + Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2"})) + Ω(pendingTexts(specs)).Should(Equal([]string{"B2"})) + }) + + It("should not report as having programmatic specs", func() { + Ω(specs.HasProgrammaticFocus()).Should(BeFalse()) + }) + }) + + Context("with both a focus and a skip regexp", func() { + BeforeEach(func() { + focusString = "1" + skipString = "B" + }) + + It("should AND the two", func() { + Ω(willRunTexts(specs)).Should(Equal([]string{"A1"})) + Ω(skippedTexts(specs)).Should(Equal([]string{"A2", "B1", "B2"})) + Ω(pendingTexts(specs)).Should(BeEmpty()) + }) + + It("should not report as having programmatic specs", func() { + Ω(specs.HasProgrammaticFocus()).Should(BeFalse()) + }) + }) + }) + + Describe("With a focused spec within a pending context and a pending spec within a focused context", func() { + BeforeEach(func() { + pendingInFocused := New( + leafnodes.NewItNode("PendingInFocused", func() {}, pendingFlag, codelocation.New(0), 0, nil, 0), + []*containernode.ContainerNode{ + containernode.New("", focusedFlag, codelocation.New(0)), + }, false) + + focusedInPending := New( + leafnodes.NewItNode("FocusedInPending", func() {}, focusedFlag, codelocation.New(0), 0, nil, 0), + []*containernode.ContainerNode{ + containernode.New("", pendingFlag, codelocation.New(0)), + }, false) + + specs = NewSpecs([]*Spec{ + newSpec("A", noneFlag), + newSpec("B", noneFlag), + pendingInFocused, + focusedInPending, + }) + specs.ApplyFocus("", "", "") + }) + + It("should not have a programmatic focus and should run all tests", func() { + Ω(willRunTexts(specs)).Should(Equal([]string{"A", "B"})) + Ω(skippedTexts(specs)).Should(BeEmpty()) + Ω(pendingTexts(specs)).Should(ConsistOf(ContainSubstring("PendingInFocused"), ContainSubstring("FocusedInPending"))) + }) + }) + + Describe("skipping measurements", func() { + BeforeEach(func() { + specs = NewSpecs([]*Spec{ + newSpec("A", noneFlag), + newSpec("B", noneFlag), + newSpec("C", pendingFlag), + newMeasureSpec("measurementA", noneFlag), + newMeasureSpec("measurementB", pendingFlag), + }) + }) + + It("should skip measurements", func() { + Ω(willRunTexts(specs)).Should(Equal([]string{"A", "B", "measurementA"})) + Ω(skippedTexts(specs)).Should(BeEmpty()) + Ω(pendingTexts(specs)).Should(Equal([]string{"C", "measurementB"})) + + specs.SkipMeasurements() + + Ω(willRunTexts(specs)).Should(Equal([]string{"A", "B"})) + Ω(skippedTexts(specs)).Should(Equal([]string{"measurementA", "measurementB"})) + Ω(pendingTexts(specs)).Should(Equal([]string{"C"})) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go new file mode 100644 index 00000000000..82272554aa8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go @@ -0,0 +1,55 @@ +package spec_iterator + +func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) { + if length == 0 { + return 0, 0 + } + + // We have more nodes than tests. Trivial case. + if parallelTotal >= length { + if parallelNode > length { + return 0, 0 + } else { + return parallelNode - 1, 1 + } + } + + // This is the minimum amount of tests that a node will be required to run + minTestsPerNode := length / parallelTotal + + // This is the maximum amount of tests that a node will be required to run + // The algorithm guarantees that this would be equal to at least the minimum amount + // and at most one more + maxTestsPerNode := minTestsPerNode + if length%parallelTotal != 0 { + maxTestsPerNode++ + } + + // Number of nodes that will have to run the maximum amount of tests per node + numMaxLoadNodes := length % parallelTotal + + // Number of nodes that precede the current node and will have to run the maximum amount of tests per node + var numPrecedingMaxLoadNodes int + if parallelNode > numMaxLoadNodes { + numPrecedingMaxLoadNodes = numMaxLoadNodes + } else { + numPrecedingMaxLoadNodes = parallelNode - 1 + } + + // Number of nodes that precede the current node and will have to run the minimum amount of tests per node + var numPrecedingMinLoadNodes int + if parallelNode <= numMaxLoadNodes { + numPrecedingMinLoadNodes = 0 + } else { + numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1 + } + + // Evaluate the test start index and number of tests to run + startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode + if parallelNode > numMaxLoadNodes { + count = minTestsPerNode + } else { + count = maxTestsPerNode + } + return +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go new file mode 100644 index 00000000000..65da9837c68 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer_test.go @@ -0,0 +1,149 @@ +package spec_iterator_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/spec_iterator" + . "github.com/onsi/gomega" +) + +var _ = Describe("ParallelizedIndexRange", func() { + var startIndex, count int + + It("should return the correct index range for 4 tests on 2 nodes", func() { + startIndex, count = ParallelizedIndexRange(4, 2, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(4, 2, 2) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(2)) + }) + + It("should return the correct index range for 5 tests on 2 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 2, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(3)) + + startIndex, count = ParallelizedIndexRange(5, 2, 2) + Ω(startIndex).Should(Equal(3)) + Ω(count).Should(Equal(2)) + }) + + It("should return the correct index range for 5 tests on 3 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 3, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(5, 3, 2) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(5, 3, 3) + Ω(startIndex).Should(Equal(4)) + Ω(count).Should(Equal(1)) + }) + + It("should return the correct index range for 5 tests on 4 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 4, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(5, 4, 2) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 4, 3) + Ω(startIndex).Should(Equal(3)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 4, 4) + Ω(startIndex).Should(Equal(4)) + Ω(count).Should(Equal(1)) + }) + + It("should return the correct index range for 5 tests on 5 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 5, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 5, 2) + Ω(startIndex).Should(Equal(1)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 5, 3) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 5, 4) + Ω(startIndex).Should(Equal(3)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 5, 5) + Ω(startIndex).Should(Equal(4)) + Ω(count).Should(Equal(1)) + }) + + It("should return the correct index range for 5 tests on 6 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 6, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 6, 2) + Ω(startIndex).Should(Equal(1)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 6, 3) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 6, 4) + Ω(startIndex).Should(Equal(3)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 6, 5) + Ω(startIndex).Should(Equal(4)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(5, 6, 6) + Ω(count).Should(Equal(0)) + }) + + It("should return the correct index range for 5 tests on 7 nodes", func() { + startIndex, count = ParallelizedIndexRange(5, 7, 6) + Ω(count).Should(Equal(0)) + + startIndex, count = ParallelizedIndexRange(5, 7, 7) + Ω(count).Should(Equal(0)) + }) + + It("should return the correct index range for 11 tests on 7 nodes", func() { + startIndex, count = ParallelizedIndexRange(11, 7, 1) + Ω(startIndex).Should(Equal(0)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(11, 7, 2) + Ω(startIndex).Should(Equal(2)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(11, 7, 3) + Ω(startIndex).Should(Equal(4)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(11, 7, 4) + Ω(startIndex).Should(Equal(6)) + Ω(count).Should(Equal(2)) + + startIndex, count = ParallelizedIndexRange(11, 7, 5) + Ω(startIndex).Should(Equal(8)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(11, 7, 6) + Ω(startIndex).Should(Equal(9)) + Ω(count).Should(Equal(1)) + + startIndex, count = ParallelizedIndexRange(11, 7, 7) + Ω(startIndex).Should(Equal(10)) + Ω(count).Should(Equal(1)) + }) + +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go new file mode 100644 index 00000000000..54e61ecb465 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go @@ -0,0 +1,60 @@ +package spec_iterator + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/onsi/ginkgo/internal/spec" +) + +type ParallelIterator struct { + specs []*spec.Spec + host string + client *http.Client +} + +func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator { + return &ParallelIterator{ + specs: specs, + host: host, + client: &http.Client{}, + } +} + +func (s *ParallelIterator) Next() (*spec.Spec, error) { + resp, err := s.client.Get(s.host + "/counter") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, errors.New(fmt.Sprintf("unexpected status code %d", resp.StatusCode)) + } + + var counter Counter + err = json.NewDecoder(resp.Body).Decode(&counter) + if err != nil { + return nil, err + } + + if counter.Index >= len(s.specs) { + return nil, ErrClosed + } + + return s.specs[counter.Index], nil +} + +func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return -1, false +} + +func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + return -1, false +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go new file mode 100644 index 00000000000..d8e05a2d60e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go @@ -0,0 +1,112 @@ +package spec_iterator_test + +import ( + "net/http" + + . "github.com/onsi/ginkgo/internal/spec_iterator" + "github.com/onsi/gomega/ghttp" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/types" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("ParallelSpecIterator", func() { + var specs []*spec.Spec + var iterator *ParallelIterator + var server *ghttp.Server + + newSpec := func(text string, flag types.FlagType) *spec.Spec { + subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0) + return spec.New(subject, []*containernode.ContainerNode{}, false) + } + + BeforeEach(func() { + specs = []*spec.Spec{ + newSpec("A", types.FlagTypePending), + newSpec("B", types.FlagTypeNone), + newSpec("C", types.FlagTypeNone), + newSpec("D", types.FlagTypeNone), + } + specs[3].Skip() + + server = ghttp.NewServer() + + iterator = NewParallelIterator(specs, "http://"+server.Addr()) + }) + + AfterEach(func() { + server.Close() + }) + + It("should report the total number of specs", func() { + Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4)) + }) + + It("should not report the number to be processed", func() { + n, known := iterator.NumberOfSpecsToProcessIfKnown() + Ω(n).Should(Equal(-1)) + Ω(known).Should(BeFalse()) + }) + + It("should not report the number that will be run", func() { + n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown() + Ω(n).Should(Equal(-1)) + Ω(known).Should(BeFalse()) + }) + + Describe("iterating", func() { + Describe("when the server returns well-formed responses", func() { + BeforeEach(func() { + server.AppendHandlers( + ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{0}), + ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{1}), + ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{3}), + ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{4}), + ) + }) + + It("should return the specs in question", func() { + Ω(iterator.Next()).Should(Equal(specs[0])) + Ω(iterator.Next()).Should(Equal(specs[1])) + Ω(iterator.Next()).Should(Equal(specs[3])) + spec, err := iterator.Next() + Ω(spec).Should(BeNil()) + Ω(err).Should(MatchError(ErrClosed)) + }) + }) + + Describe("when the server 404s", func() { + BeforeEach(func() { + server.AppendHandlers( + ghttp.RespondWith(http.StatusNotFound, ""), + ) + }) + + It("should return an error", func() { + spec, err := iterator.Next() + Ω(spec).Should(BeNil()) + Ω(err).Should(MatchError("unexpected status code 404")) + }) + }) + + Describe("when the server returns gibberish", func() { + BeforeEach(func() { + server.AppendHandlers( + ghttp.RespondWith(http.StatusOK, "ß"), + ) + }) + + It("should error", func() { + spec, err := iterator.Next() + Ω(spec).Should(BeNil()) + Ω(err).ShouldNot(BeNil()) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go new file mode 100644 index 00000000000..a51c93b8b61 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go @@ -0,0 +1,45 @@ +package spec_iterator + +import ( + "github.com/onsi/ginkgo/internal/spec" +) + +type SerialIterator struct { + specs []*spec.Spec + index int +} + +func NewSerialIterator(specs []*spec.Spec) *SerialIterator { + return &SerialIterator{ + specs: specs, + index: 0, + } +} + +func (s *SerialIterator) Next() (*spec.Spec, error) { + if s.index >= len(s.specs) { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *SerialIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return len(s.specs), true +} + +func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for _, s := range s.specs { + if !s.Skipped() && !s.Pending() { + count += 1 + } + } + return count, true +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go new file mode 100644 index 00000000000..dde4a344e86 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator_test.go @@ -0,0 +1,64 @@ +package spec_iterator_test + +import ( + . "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/types" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("SerialSpecIterator", func() { + var specs []*spec.Spec + var iterator *SerialIterator + + newSpec := func(text string, flag types.FlagType) *spec.Spec { + subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0) + return spec.New(subject, []*containernode.ContainerNode{}, false) + } + + BeforeEach(func() { + specs = []*spec.Spec{ + newSpec("A", types.FlagTypePending), + newSpec("B", types.FlagTypeNone), + newSpec("C", types.FlagTypeNone), + newSpec("D", types.FlagTypeNone), + } + specs[3].Skip() + + iterator = NewSerialIterator(specs) + }) + + It("should report the total number of specs", func() { + Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4)) + }) + + It("should report the number to be processed", func() { + n, known := iterator.NumberOfSpecsToProcessIfKnown() + Ω(n).Should(Equal(4)) + Ω(known).Should(BeTrue()) + }) + + It("should report the number that will be run", func() { + n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown() + Ω(n).Should(Equal(2)) + Ω(known).Should(BeTrue()) + }) + + Describe("iterating", func() { + It("should return the specs in order", func() { + Ω(iterator.Next()).Should(Equal(specs[0])) + Ω(iterator.Next()).Should(Equal(specs[1])) + Ω(iterator.Next()).Should(Equal(specs[2])) + Ω(iterator.Next()).Should(Equal(specs[3])) + spec, err := iterator.Next() + Ω(spec).Should(BeNil()) + Ω(err).Should(MatchError(ErrClosed)) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go new file mode 100644 index 00000000000..ad4a3ea3c64 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go @@ -0,0 +1,47 @@ +package spec_iterator + +import "github.com/onsi/ginkgo/internal/spec" + +type ShardedParallelIterator struct { + specs []*spec.Spec + index int + maxIndex int +} + +func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator { + startIndex, count := ParallelizedIndexRange(len(specs), total, node) + + return &ShardedParallelIterator{ + specs: specs, + index: startIndex, + maxIndex: startIndex + count, + } +} + +func (s *ShardedParallelIterator) Next() (*spec.Spec, error) { + if s.index >= s.maxIndex { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return s.maxIndex - s.index, true +} + +func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for i := s.index; i < s.maxIndex; i += 1 { + if !s.specs[i].Skipped() && !s.specs[i].Pending() { + count += 1 + } + } + return count, true +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go new file mode 100644 index 00000000000..c3786e03aa8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator_test.go @@ -0,0 +1,62 @@ +package spec_iterator_test + +import ( + . "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/types" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("ShardedParallelSpecIterator", func() { + var specs []*spec.Spec + var iterator *ShardedParallelIterator + + newSpec := func(text string, flag types.FlagType) *spec.Spec { + subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0) + return spec.New(subject, []*containernode.ContainerNode{}, false) + } + + BeforeEach(func() { + specs = []*spec.Spec{ + newSpec("A", types.FlagTypePending), + newSpec("B", types.FlagTypeNone), + newSpec("C", types.FlagTypeNone), + newSpec("D", types.FlagTypeNone), + } + specs[3].Skip() + + iterator = NewShardedParallelIterator(specs, 2, 1) + }) + + It("should report the total number of specs", func() { + Ω(iterator.NumberOfSpecsPriorToIteration()).Should(Equal(4)) + }) + + It("should report the number to be processed", func() { + n, known := iterator.NumberOfSpecsToProcessIfKnown() + Ω(n).Should(Equal(2)) + Ω(known).Should(BeTrue()) + }) + + It("should report the number that will be run", func() { + n, known := iterator.NumberOfSpecsThatWillBeRunIfKnown() + Ω(n).Should(Equal(1)) + Ω(known).Should(BeTrue()) + }) + + Describe("iterating", func() { + It("should return the specs in order", func() { + Ω(iterator.Next()).Should(Equal(specs[0])) + Ω(iterator.Next()).Should(Equal(specs[1])) + spec, err := iterator.Next() + Ω(spec).Should(BeNil()) + Ω(err).Should(MatchError(ErrClosed)) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go new file mode 100644 index 00000000000..74bffad6464 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go @@ -0,0 +1,20 @@ +package spec_iterator + +import ( + "errors" + + "github.com/onsi/ginkgo/internal/spec" +) + +var ErrClosed = errors.New("no more specs to run") + +type SpecIterator interface { + Next() (*spec.Spec, error) + NumberOfSpecsPriorToIteration() int + NumberOfSpecsToProcessIfKnown() (int, bool) + NumberOfSpecsThatWillBeRunIfKnown() (int, bool) +} + +type Counter struct { + Index int `json:"index"` +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go new file mode 100644 index 00000000000..5c08a77e369 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator_suite_test.go @@ -0,0 +1,13 @@ +package spec_iterator_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestSpecIterator(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "SpecIterator Suite") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go new file mode 100644 index 00000000000..a0b8b62d525 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go @@ -0,0 +1,15 @@ +package specrunner + +import ( + "crypto/rand" + "fmt" +) + +func randomID() string { + b := make([]byte, 8) + _, err := rand.Read(b) + if err != nil { + return "" + } + return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8]) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go new file mode 100644 index 00000000000..d4dd909ecf5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go @@ -0,0 +1,408 @@ +package specrunner + +import ( + "fmt" + "os" + "os/signal" + "sync" + "syscall" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + Writer "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + + "time" +) + +type SpecRunner struct { + description string + beforeSuiteNode leafnodes.SuiteNode + iterator spec_iterator.SpecIterator + afterSuiteNode leafnodes.SuiteNode + reporters []reporters.Reporter + startTime time.Time + suiteID string + runningSpec *spec.Spec + writer Writer.WriterInterface + config config.GinkgoConfigType + interrupted bool + processedSpecs []*spec.Spec + lock *sync.Mutex +} + +func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner { + return &SpecRunner{ + description: description, + beforeSuiteNode: beforeSuiteNode, + iterator: iterator, + afterSuiteNode: afterSuiteNode, + reporters: reporters, + writer: writer, + config: config, + suiteID: randomID(), + lock: &sync.Mutex{}, + } +} + +func (runner *SpecRunner) Run() bool { + if runner.config.DryRun { + runner.performDryRun() + return true + } + + runner.reportSuiteWillBegin() + go runner.registerForInterrupts() + + suitePassed := runner.runBeforeSuite() + + if suitePassed { + suitePassed = runner.runSpecs() + } + + runner.blockForeverIfInterrupted() + + suitePassed = runner.runAfterSuite() && suitePassed + + runner.reportSuiteDidEnd(suitePassed) + + return suitePassed +} + +func (runner *SpecRunner) performDryRun() { + runner.reportSuiteWillBegin() + + if runner.beforeSuiteNode != nil { + summary := runner.beforeSuiteNode.Summary() + summary.State = types.SpecStatePassed + runner.reportBeforeSuite(summary) + } + + for { + spec, err := runner.iterator.Next() + if err == spec_iterator.ErrClosed { + break + } + if err != nil { + fmt.Println("failed to iterate over tests:\n" + err.Error()) + break + } + + runner.processedSpecs = append(runner.processedSpecs, spec) + + summary := spec.Summary(runner.suiteID) + runner.reportSpecWillRun(summary) + if summary.State == types.SpecStateInvalid { + summary.State = types.SpecStatePassed + } + runner.reportSpecDidComplete(summary, false) + } + + if runner.afterSuiteNode != nil { + summary := runner.afterSuiteNode.Summary() + summary.State = types.SpecStatePassed + runner.reportAfterSuite(summary) + } + + runner.reportSuiteDidEnd(true) +} + +func (runner *SpecRunner) runBeforeSuite() bool { + if runner.beforeSuiteNode == nil || runner.wasInterrupted() { + return true + } + + runner.writer.Truncate() + conf := runner.config + passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) + if !passed { + runner.writer.DumpOut() + } + runner.reportBeforeSuite(runner.beforeSuiteNode.Summary()) + return passed +} + +func (runner *SpecRunner) runAfterSuite() bool { + if runner.afterSuiteNode == nil { + return true + } + + runner.writer.Truncate() + conf := runner.config + passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) + if !passed { + runner.writer.DumpOut() + } + runner.reportAfterSuite(runner.afterSuiteNode.Summary()) + return passed +} + +func (runner *SpecRunner) runSpecs() bool { + suiteFailed := false + skipRemainingSpecs := false + for { + spec, err := runner.iterator.Next() + if err == spec_iterator.ErrClosed { + break + } + if err != nil { + fmt.Println("failed to iterate over tests:\n" + err.Error()) + suiteFailed = true + break + } + + runner.processedSpecs = append(runner.processedSpecs, spec) + + if runner.wasInterrupted() { + break + } + if skipRemainingSpecs { + spec.Skip() + } + + if !spec.Skipped() && !spec.Pending() { + if passed := runner.runSpec(spec); !passed { + suiteFailed = true + } + } else if spec.Pending() && runner.config.FailOnPending { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + suiteFailed = true + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + } else { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + } + + if spec.Failed() && runner.config.FailFast { + skipRemainingSpecs = true + } + } + + return !suiteFailed +} + +func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) { + maxAttempts := 1 + if runner.config.FlakeAttempts > 0 { + // uninitialized configs count as 1 + maxAttempts = runner.config.FlakeAttempts + } + + for i := 0; i < maxAttempts; i++ { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + runner.runningSpec = spec + spec.Run(runner.writer) + runner.runningSpec = nil + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + if !spec.Failed() { + return true + } + } + return false +} + +func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) { + if runner.runningSpec == nil { + return nil, false + } + + return runner.runningSpec.Summary(runner.suiteID), true +} + +func (runner *SpecRunner) registerForInterrupts() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + + <-c + signal.Stop(c) + runner.markInterrupted() + go runner.registerForHardInterrupts() + runner.writer.DumpOutWithHeader(` +Received interrupt. Emitting contents of GinkgoWriter... +--------------------------------------------------------- +`) + if runner.afterSuiteNode != nil { + fmt.Fprint(os.Stderr, ` +--------------------------------------------------------- +Received interrupt. Running AfterSuite... +^C again to terminate immediately +`) + runner.runAfterSuite() + } + runner.reportSuiteDidEnd(false) + os.Exit(1) +} + +func (runner *SpecRunner) registerForHardInterrupts() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + + <-c + fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.") + os.Exit(1) +} + +func (runner *SpecRunner) blockForeverIfInterrupted() { + runner.lock.Lock() + interrupted := runner.interrupted + runner.lock.Unlock() + + if interrupted { + select {} + } +} + +func (runner *SpecRunner) markInterrupted() { + runner.lock.Lock() + defer runner.lock.Unlock() + runner.interrupted = true +} + +func (runner *SpecRunner) wasInterrupted() bool { + runner.lock.Lock() + defer runner.lock.Unlock() + return runner.interrupted +} + +func (runner *SpecRunner) reportSuiteWillBegin() { + runner.startTime = time.Now() + summary := runner.suiteWillBeginSummary() + for _, reporter := range runner.reporters { + reporter.SpecSuiteWillBegin(runner.config, summary) + } +} + +func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) { + for _, reporter := range runner.reporters { + reporter.BeforeSuiteDidRun(summary) + } +} + +func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) { + for _, reporter := range runner.reporters { + reporter.AfterSuiteDidRun(summary) + } +} + +func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) { + runner.writer.Truncate() + + for _, reporter := range runner.reporters { + reporter.SpecWillRun(summary) + } +} + +func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) { + if failed && len(summary.CapturedOutput) == 0 { + summary.CapturedOutput = string(runner.writer.Bytes()) + } + for i := len(runner.reporters) - 1; i >= 1; i-- { + runner.reporters[i].SpecDidComplete(summary) + } + + if failed { + runner.writer.DumpOut() + } + + runner.reporters[0].SpecDidComplete(summary) +} + +func (runner *SpecRunner) reportSuiteDidEnd(success bool) { + summary := runner.suiteDidEndSummary(success) + summary.RunTime = time.Since(runner.startTime) + for _, reporter := range runner.reporters { + reporter.SpecSuiteDidEnd(summary) + } +} + +func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) { + count = 0 + + for _, spec := range runner.processedSpecs { + if filter(spec) { + count++ + } + } + + return count +} + +func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary { + numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return !ex.Skipped() && !ex.Pending() + }) + + numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Pending() + }) + + numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Skipped() + }) + + numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Passed() + }) + + numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Flaked() + }) + + numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Failed() + }) + + if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun { + var known bool + numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() + if !known { + numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration() + } + numberOfFailedSpecs = numberOfSpecsThatWillBeRun + } + + return &types.SuiteSummary{ + SuiteDescription: runner.description, + SuiteSucceeded: success, + SuiteID: runner.suiteID, + + NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), + NumberOfTotalSpecs: len(runner.processedSpecs), + NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun, + NumberOfPendingSpecs: numberOfPendingSpecs, + NumberOfSkippedSpecs: numberOfSkippedSpecs, + NumberOfPassedSpecs: numberOfPassedSpecs, + NumberOfFailedSpecs: numberOfFailedSpecs, + NumberOfFlakedSpecs: numberOfFlakedSpecs, + } +} + +func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary { + numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown() + if !known { + numTotal = -1 + } + + numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() + if !known { + numToRun = -1 + } + + return &types.SuiteSummary{ + SuiteDescription: runner.description, + SuiteID: runner.suiteID, + + NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), + NumberOfTotalSpecs: numTotal, + NumberOfSpecsThatWillBeRun: numToRun, + NumberOfPendingSpecs: -1, + NumberOfSkippedSpecs: -1, + NumberOfPassedSpecs: -1, + NumberOfFailedSpecs: -1, + NumberOfFlakedSpecs: -1, + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go new file mode 100644 index 00000000000..c8388fb6f7d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go @@ -0,0 +1,13 @@ +package specrunner_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestSpecRunner(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Spec Runner Suite") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go new file mode 100644 index 00000000000..77272e8b868 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go @@ -0,0 +1,779 @@ +package specrunner_test + +import ( + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/internal/spec_iterator" + . "github.com/onsi/ginkgo/internal/specrunner" + "github.com/onsi/ginkgo/types" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/containernode" + Failer "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + Writer "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" +) + +var noneFlag = types.FlagTypeNone +var focusedFlag = types.FlagTypeFocused +var pendingFlag = types.FlagTypePending + +var _ = Describe("Spec Runner", func() { + var ( + reporter1 *reporters.FakeReporter + reporter2 *reporters.FakeReporter + failer *Failer.Failer + writer *Writer.FakeGinkgoWriter + + thingsThatRan []string + + runner *SpecRunner + ) + + newBefSuite := func(text string, fail bool) leafnodes.SuiteNode { + return leafnodes.NewBeforeSuiteNode(func() { + writer.AddEvent(text) + thingsThatRan = append(thingsThatRan, text) + if fail { + failer.Fail(text, codelocation.New(0)) + } + }, codelocation.New(0), 0, failer) + } + + newAftSuite := func(text string, fail bool) leafnodes.SuiteNode { + return leafnodes.NewAfterSuiteNode(func() { + writer.AddEvent(text) + thingsThatRan = append(thingsThatRan, text) + if fail { + failer.Fail(text, codelocation.New(0)) + } + }, codelocation.New(0), 0, failer) + } + + newSpec := func(text string, flag types.FlagType, fail bool) *spec.Spec { + subject := leafnodes.NewItNode(text, func() { + writer.AddEvent(text) + thingsThatRan = append(thingsThatRan, text) + if fail { + failer.Fail(text, codelocation.New(0)) + } + }, flag, codelocation.New(0), 0, failer, 0) + + return spec.New(subject, []*containernode.ContainerNode{}, false) + } + + newFlakySpec := func(text string, flag types.FlagType, failures int) *spec.Spec { + runs := 0 + subject := leafnodes.NewItNode(text, func() { + writer.AddEvent(text) + thingsThatRan = append(thingsThatRan, text) + runs++ + if runs < failures { + failer.Fail(text, codelocation.New(0)) + } + }, flag, codelocation.New(0), 0, failer, 0) + + return spec.New(subject, []*containernode.ContainerNode{}, false) + } + + newSpecWithBody := func(text string, body interface{}) *spec.Spec { + subject := leafnodes.NewItNode(text, body, noneFlag, codelocation.New(0), 0, failer, 0) + + return spec.New(subject, []*containernode.ContainerNode{}, false) + } + + newRunner := func(config config.GinkgoConfigType, beforeSuiteNode leafnodes.SuiteNode, afterSuiteNode leafnodes.SuiteNode, specs ...*spec.Spec) *SpecRunner { + iterator := spec_iterator.NewSerialIterator(specs) + return New("description", beforeSuiteNode, iterator, afterSuiteNode, []reporters.Reporter{reporter1, reporter2}, writer, config) + } + + BeforeEach(func() { + reporter1 = reporters.NewFakeReporter() + reporter2 = reporters.NewFakeReporter() + writer = Writer.NewFake() + failer = Failer.New() + + thingsThatRan = []string{} + }) + + Describe("Running and Reporting", func() { + var specA, pendingSpec, anotherPendingSpec, failedSpec, specB, skippedSpec *spec.Spec + var willRunCalls, didCompleteCalls []string + var conf config.GinkgoConfigType + + JustBeforeEach(func() { + willRunCalls = []string{} + didCompleteCalls = []string{} + specA = newSpec("spec A", noneFlag, false) + pendingSpec = newSpec("pending spec", pendingFlag, false) + anotherPendingSpec = newSpec("another pending spec", pendingFlag, false) + failedSpec = newSpec("failed spec", noneFlag, true) + specB = newSpec("spec B", noneFlag, false) + skippedSpec = newSpec("skipped spec", noneFlag, false) + skippedSpec.Skip() + + reporter1.SpecWillRunStub = func(specSummary *types.SpecSummary) { + willRunCalls = append(willRunCalls, "Reporter1") + } + reporter2.SpecWillRunStub = func(specSummary *types.SpecSummary) { + willRunCalls = append(willRunCalls, "Reporter2") + } + + reporter1.SpecDidCompleteStub = func(specSummary *types.SpecSummary) { + didCompleteCalls = append(didCompleteCalls, "Reporter1") + } + reporter2.SpecDidCompleteStub = func(specSummary *types.SpecSummary) { + didCompleteCalls = append(didCompleteCalls, "Reporter2") + } + + runner = newRunner(conf, newBefSuite("BefSuite", false), newAftSuite("AftSuite", false), specA, pendingSpec, anotherPendingSpec, failedSpec, specB, skippedSpec) + runner.Run() + }) + + BeforeEach(func() { + conf = config.GinkgoConfigType{RandomSeed: 17} + }) + + It("should skip skipped/pending tests", func() { + Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "spec A", "failed spec", "spec B", "AftSuite"})) + }) + + It("should report to any attached reporters", func() { + Ω(reporter1.Config).Should(Equal(reporter2.Config)) + Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary)) + Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary)) + Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries)) + Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries)) + Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary)) + Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary)) + }) + + It("should report that a spec did end in reverse order", func() { + Ω(willRunCalls[0:4]).Should(Equal([]string{"Reporter1", "Reporter2", "Reporter1", "Reporter2"})) + Ω(didCompleteCalls[0:4]).Should(Equal([]string{"Reporter2", "Reporter1", "Reporter2", "Reporter1"})) + }) + + It("should report the passed in config", func() { + Ω(reporter1.Config.RandomSeed).Should(BeNumerically("==", 17)) + }) + + It("should report the beginning of the suite", func() { + Ω(reporter1.BeginSummary.SuiteDescription).Should(Equal("description")) + Ω(reporter1.BeginSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}")) + Ω(reporter1.BeginSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6)) + Ω(reporter1.BeginSummary.NumberOfTotalSpecs).Should(Equal(6)) + Ω(reporter1.BeginSummary.NumberOfSpecsThatWillBeRun).Should(Equal(3)) + Ω(reporter1.BeginSummary.NumberOfPendingSpecs).Should(Equal(-1)) + Ω(reporter1.BeginSummary.NumberOfSkippedSpecs).Should(Equal(-1)) + }) + + It("should report the end of the suite", func() { + Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description")) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) + Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}")) + Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6)) + Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6)) + Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(3)) + Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(2)) + Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1)) + Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(2)) + Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(1)) + }) + + Context("when told to perform a dry run", func() { + BeforeEach(func() { + conf.DryRun = true + }) + + It("should report to the reporters", func() { + Ω(reporter1.Config).Should(Equal(reporter2.Config)) + Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary)) + Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary)) + Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries)) + Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries)) + Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary)) + Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary)) + }) + + It("should not actually run anything", func() { + Ω(thingsThatRan).Should(BeEmpty()) + }) + + It("report before and after suites as passed", func() { + Ω(reporter1.BeforeSuiteSummary.State).Should(Equal(types.SpecStatePassed)) + Ω(reporter1.AfterSuiteSummary.State).Should(Equal(types.SpecStatePassed)) + }) + + It("should report specs as passed", func() { + summaries := reporter1.SpecSummaries + Ω(summaries).Should(HaveLen(6)) + Ω(summaries[0].ComponentTexts).Should(ContainElement("spec A")) + Ω(summaries[0].State).Should(Equal(types.SpecStatePassed)) + Ω(summaries[1].ComponentTexts).Should(ContainElement("pending spec")) + Ω(summaries[1].State).Should(Equal(types.SpecStatePending)) + Ω(summaries[2].ComponentTexts).Should(ContainElement("another pending spec")) + Ω(summaries[2].State).Should(Equal(types.SpecStatePending)) + Ω(summaries[3].ComponentTexts).Should(ContainElement("failed spec")) + Ω(summaries[3].State).Should(Equal(types.SpecStatePassed)) + Ω(summaries[4].ComponentTexts).Should(ContainElement("spec B")) + Ω(summaries[4].State).Should(Equal(types.SpecStatePassed)) + Ω(summaries[5].ComponentTexts).Should(ContainElement("skipped spec")) + Ω(summaries[5].State).Should(Equal(types.SpecStateSkipped)) + }) + + It("should report the end of the suite", func() { + Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description")) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue()) + Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}")) + Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6)) + Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6)) + Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(3)) + Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(2)) + Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1)) + Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(0)) + Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0)) + }) + }) + }) + + Describe("reporting on specs", func() { + var proceed chan bool + var ready chan bool + var finished chan bool + BeforeEach(func() { + ready = make(chan bool) + proceed = make(chan bool) + finished = make(chan bool) + skippedSpec := newSpec("SKIP", noneFlag, false) + skippedSpec.Skip() + + runner = newRunner( + config.GinkgoConfigType{}, + newBefSuite("BefSuite", false), + newAftSuite("AftSuite", false), + skippedSpec, + newSpec("PENDING", pendingFlag, false), + newSpecWithBody("RUN", func() { + close(ready) + <-proceed + }), + ) + go func() { + runner.Run() + close(finished) + }() + }) + + It("should report about pending/skipped specs", func() { + <-ready + Ω(reporter1.SpecWillRunSummaries).Should(HaveLen(3)) + + Ω(reporter1.SpecWillRunSummaries[0].ComponentTexts[0]).Should(Equal("SKIP")) + Ω(reporter1.SpecWillRunSummaries[1].ComponentTexts[0]).Should(Equal("PENDING")) + Ω(reporter1.SpecWillRunSummaries[2].ComponentTexts[0]).Should(Equal("RUN")) + + Ω(reporter1.SpecSummaries[0].ComponentTexts[0]).Should(Equal("SKIP")) + Ω(reporter1.SpecSummaries[1].ComponentTexts[0]).Should(Equal("PENDING")) + Ω(reporter1.SpecSummaries).Should(HaveLen(2)) + + close(proceed) + <-finished + + Ω(reporter1.SpecSummaries).Should(HaveLen(3)) + Ω(reporter1.SpecSummaries[2].ComponentTexts[0]).Should(Equal("RUN")) + }) + }) + + Describe("Running and Reporting when there's flakes", func() { + var specA, pendingSpec, flakySpec, failedSpec, specB, skippedSpec *spec.Spec + var willRunCalls, didCompleteCalls []string + var conf config.GinkgoConfigType + var failedSpecFlag = noneFlag + + JustBeforeEach(func() { + willRunCalls = []string{} + didCompleteCalls = []string{} + specA = newSpec("spec A", noneFlag, false) + pendingSpec = newSpec("pending spec", pendingFlag, false) + flakySpec = newFlakySpec("flaky spec", noneFlag, 3) + failedSpec = newSpec("failed spec", failedSpecFlag, true) + specB = newSpec("spec B", noneFlag, false) + skippedSpec = newSpec("skipped spec", noneFlag, false) + skippedSpec.Skip() + + reporter1.SpecWillRunStub = func(specSummary *types.SpecSummary) { + willRunCalls = append(willRunCalls, "Reporter1") + } + reporter2.SpecWillRunStub = func(specSummary *types.SpecSummary) { + willRunCalls = append(willRunCalls, "Reporter2") + } + + reporter1.SpecDidCompleteStub = func(specSummary *types.SpecSummary) { + didCompleteCalls = append(didCompleteCalls, "Reporter1") + } + reporter2.SpecDidCompleteStub = func(specSummary *types.SpecSummary) { + didCompleteCalls = append(didCompleteCalls, "Reporter2") + } + + runner = newRunner(conf, newBefSuite("BefSuite", false), newAftSuite("AftSuite", false), specA, pendingSpec, flakySpec, failedSpec, specB, skippedSpec) + runner.Run() + }) + + BeforeEach(func() { + failedSpecFlag = noneFlag + conf = config.GinkgoConfigType{ + RandomSeed: 17, + FlakeAttempts: 5, + } + }) + + It("should skip skipped/pending tests", func() { + Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "spec A", "flaky spec", "flaky spec", "flaky spec", "failed spec", "failed spec", "failed spec", "failed spec", "failed spec", "spec B", "AftSuite"})) + }) + + It("should report to any attached reporters", func() { + Ω(reporter1.Config).Should(Equal(reporter2.Config)) + Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary)) + Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary)) + Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries)) + Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries)) + Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary)) + Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary)) + }) + + It("should report that a spec did end in reverse order", func() { + Ω(willRunCalls[0:4]).Should(Equal([]string{"Reporter1", "Reporter2", "Reporter1", "Reporter2"})) + Ω(didCompleteCalls[0:4]).Should(Equal([]string{"Reporter2", "Reporter1", "Reporter2", "Reporter1"})) + }) + + It("should report the passed in config", func() { + Ω(reporter1.Config.RandomSeed).Should(BeNumerically("==", 17)) + }) + + It("should report the beginning of the suite", func() { + Ω(reporter1.BeginSummary.SuiteDescription).Should(Equal("description")) + Ω(reporter1.BeginSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}")) + Ω(reporter1.BeginSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6)) + Ω(reporter1.BeginSummary.NumberOfTotalSpecs).Should(Equal(6)) + Ω(reporter1.BeginSummary.NumberOfSpecsThatWillBeRun).Should(Equal(4)) + Ω(reporter1.BeginSummary.NumberOfPendingSpecs).Should(Equal(-1)) + Ω(reporter1.BeginSummary.NumberOfSkippedSpecs).Should(Equal(-1)) + }) + + It("should report the end of the suite", func() { + Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description")) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) + Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}")) + Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6)) + Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6)) + Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(4)) + Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(1)) + Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1)) + Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(3)) + Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(1)) + Ω(reporter1.EndSummary.NumberOfFlakedSpecs).Should(Equal(1)) + }) + + Context("when nothing fails", func() { + BeforeEach(func() { + failedSpecFlag = pendingFlag + }) + + It("the suite should pass even with flakes", func() { + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue()) + Ω(reporter1.EndSummary.NumberOfFlakedSpecs).Should(Equal(1)) + }) + }) + + Context("when told to perform a dry run", func() { + BeforeEach(func() { + conf.DryRun = true + }) + + It("should report to the reporters", func() { + Ω(reporter1.Config).Should(Equal(reporter2.Config)) + Ω(reporter1.BeforeSuiteSummary).Should(Equal(reporter2.BeforeSuiteSummary)) + Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary)) + Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries)) + Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries)) + Ω(reporter1.AfterSuiteSummary).Should(Equal(reporter2.AfterSuiteSummary)) + Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary)) + }) + + It("should not actually run anything", func() { + Ω(thingsThatRan).Should(BeEmpty()) + }) + + It("report before and after suites as passed", func() { + Ω(reporter1.BeforeSuiteSummary.State).Should(Equal(types.SpecStatePassed)) + Ω(reporter1.AfterSuiteSummary.State).Should(Equal(types.SpecStatePassed)) + }) + + It("should report specs as passed", func() { + summaries := reporter1.SpecSummaries + Ω(summaries).Should(HaveLen(6)) + Ω(summaries[0].ComponentTexts).Should(ContainElement("spec A")) + Ω(summaries[0].State).Should(Equal(types.SpecStatePassed)) + Ω(summaries[1].ComponentTexts).Should(ContainElement("pending spec")) + Ω(summaries[1].State).Should(Equal(types.SpecStatePending)) + Ω(summaries[2].ComponentTexts).Should(ContainElement("flaky spec")) + Ω(summaries[2].State).Should(Equal(types.SpecStatePassed)) + Ω(summaries[3].ComponentTexts).Should(ContainElement("failed spec")) + Ω(summaries[3].State).Should(Equal(types.SpecStatePassed)) + Ω(summaries[4].ComponentTexts).Should(ContainElement("spec B")) + Ω(summaries[4].State).Should(Equal(types.SpecStatePassed)) + Ω(summaries[5].ComponentTexts).Should(ContainElement("skipped spec")) + Ω(summaries[5].State).Should(Equal(types.SpecStateSkipped)) + }) + + It("should report the end of the suite", func() { + Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description")) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue()) + Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}")) + Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6)) + Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6)) + Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(4)) + Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(1)) + Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1)) + Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(0)) + Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0)) + }) + }) + }) + + Describe("Running BeforeSuite & AfterSuite", func() { + var success bool + var befSuite leafnodes.SuiteNode + var aftSuite leafnodes.SuiteNode + Context("with a nil BeforeSuite & AfterSuite", func() { + BeforeEach(func() { + runner = newRunner( + config.GinkgoConfigType{}, + nil, + nil, + newSpec("A", noneFlag, false), + newSpec("B", noneFlag, false), + ) + success = runner.Run() + }) + + It("should not report about the BeforeSuite", func() { + Ω(reporter1.BeforeSuiteSummary).Should(BeNil()) + }) + + It("should not report about the AfterSuite", func() { + Ω(reporter1.AfterSuiteSummary).Should(BeNil()) + }) + + It("should run the specs", func() { + Ω(thingsThatRan).Should(Equal([]string{"A", "B"})) + }) + }) + + Context("when the BeforeSuite & AfterSuite pass", func() { + BeforeEach(func() { + befSuite = newBefSuite("BefSuite", false) + aftSuite = newBefSuite("AftSuite", false) + runner = newRunner( + config.GinkgoConfigType{}, + befSuite, + aftSuite, + newSpec("A", noneFlag, false), + newSpec("B", noneFlag, false), + ) + success = runner.Run() + }) + + It("should run the BeforeSuite, the AfterSuite and the specs", func() { + Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "A", "B", "AftSuite"})) + }) + + It("should report about the BeforeSuite", func() { + Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary())) + }) + + It("should report about the AfterSuite", func() { + Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary())) + }) + + It("should report success", func() { + Ω(success).Should(BeTrue()) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue()) + Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0)) + }) + + It("should not dump the writer", func() { + Ω(writer.EventStream).ShouldNot(ContainElement("DUMP")) + }) + }) + + Context("when the BeforeSuite fails", func() { + BeforeEach(func() { + befSuite = newBefSuite("BefSuite", true) + aftSuite = newBefSuite("AftSuite", false) + + skipped := newSpec("Skipped", noneFlag, false) + skipped.Skip() + + runner = newRunner( + config.GinkgoConfigType{}, + befSuite, + aftSuite, + newSpec("A", noneFlag, false), + newSpec("B", noneFlag, false), + newSpec("Pending", pendingFlag, false), + skipped, + ) + success = runner.Run() + }) + + It("should not run the specs, but it should run the AfterSuite", func() { + Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "AftSuite"})) + }) + + It("should report about the BeforeSuite", func() { + Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary())) + }) + + It("should report about the AfterSuite", func() { + Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary())) + }) + + It("should report failure", func() { + Ω(success).Should(BeFalse()) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) + Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(2)) + Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(2)) + }) + + It("should dump the writer", func() { + Ω(writer.EventStream).Should(ContainElement("DUMP")) + }) + }) + + Context("when some other test fails", func() { + BeforeEach(func() { + aftSuite = newBefSuite("AftSuite", false) + + runner = newRunner( + config.GinkgoConfigType{}, + nil, + aftSuite, + newSpec("A", noneFlag, true), + ) + success = runner.Run() + }) + + It("should still run the AfterSuite", func() { + Ω(thingsThatRan).Should(Equal([]string{"A", "AftSuite"})) + }) + + It("should report about the AfterSuite", func() { + Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary())) + }) + + It("should report failure", func() { + Ω(success).Should(BeFalse()) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) + Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(1)) + Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(1)) + }) + }) + + Context("when the AfterSuite fails", func() { + BeforeEach(func() { + befSuite = newBefSuite("BefSuite", false) + aftSuite = newBefSuite("AftSuite", true) + runner = newRunner( + config.GinkgoConfigType{}, + befSuite, + aftSuite, + newSpec("A", noneFlag, false), + newSpec("B", noneFlag, false), + ) + success = runner.Run() + }) + + It("should run everything", func() { + Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "A", "B", "AftSuite"})) + }) + + It("should report about the BeforeSuite", func() { + Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary())) + }) + + It("should report about the AfterSuite", func() { + Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary())) + }) + + It("should report failure", func() { + Ω(success).Should(BeFalse()) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) + Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0)) + }) + + It("should dump the writer", func() { + Ω(writer.EventStream).Should(ContainElement("DUMP")) + }) + }) + }) + + Describe("When instructed to fail fast", func() { + BeforeEach(func() { + conf := config.GinkgoConfigType{ + FailFast: true, + } + runner = newRunner(conf, nil, newAftSuite("after-suite", false), newSpec("passing", noneFlag, false), newSpec("failing", noneFlag, true), newSpec("dont-see", noneFlag, true), newSpec("dont-see", noneFlag, true)) + }) + + It("should return false, report failure, and not run anything past the failing test", func() { + Ω(runner.Run()).Should(BeFalse()) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) + Ω(thingsThatRan).Should(Equal([]string{"passing", "failing", "after-suite"})) + }) + + It("should announce the subsequent specs as skipped", func() { + runner.Run() + Ω(reporter1.SpecSummaries).Should(HaveLen(4)) + Ω(reporter1.SpecSummaries[2].State).Should(Equal(types.SpecStateSkipped)) + Ω(reporter1.SpecSummaries[3].State).Should(Equal(types.SpecStateSkipped)) + }) + + It("should mark all subsequent specs as skipped", func() { + runner.Run() + Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(2)) + }) + }) + + Describe("Marking failure and success", func() { + Context("when all tests pass", func() { + BeforeEach(func() { + runner = newRunner(config.GinkgoConfigType{}, nil, nil, newSpec("passing", noneFlag, false), newSpec("pending", pendingFlag, false)) + }) + + It("should return true and report success", func() { + Ω(runner.Run()).Should(BeTrue()) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue()) + }) + }) + + Context("when a test fails", func() { + BeforeEach(func() { + runner = newRunner(config.GinkgoConfigType{}, nil, nil, newSpec("failing", noneFlag, true), newSpec("pending", pendingFlag, false)) + }) + + It("should return false and report failure", func() { + Ω(runner.Run()).Should(BeFalse()) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) + }) + }) + + Context("when there is a pending test, but pendings count as failures", func() { + BeforeEach(func() { + runner = newRunner(config.GinkgoConfigType{FailOnPending: true}, nil, nil, newSpec("passing", noneFlag, false), newSpec("pending", pendingFlag, false)) + }) + + It("should return false and report failure", func() { + Ω(runner.Run()).Should(BeFalse()) + Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) + }) + }) + }) + + Describe("Managing the writer", func() { + BeforeEach(func() { + runner = newRunner( + config.GinkgoConfigType{}, + nil, + nil, + newSpec("A", noneFlag, false), + newSpec("B", noneFlag, true), + newSpec("C", noneFlag, false), + ) + reporter1.SpecWillRunStub = func(specSummary *types.SpecSummary) { + writer.AddEvent("R1.WillRun") + } + reporter2.SpecWillRunStub = func(specSummary *types.SpecSummary) { + writer.AddEvent("R2.WillRun") + } + reporter1.SpecDidCompleteStub = func(specSummary *types.SpecSummary) { + writer.AddEvent("R1.DidComplete") + } + reporter2.SpecDidCompleteStub = func(specSummary *types.SpecSummary) { + writer.AddEvent("R2.DidComplete") + } + runner.Run() + }) + + It("should truncate between tests, but only dump if a test fails", func() { + Ω(writer.EventStream).Should(Equal([]string{ + "TRUNCATE", + "R1.WillRun", + "R2.WillRun", + "A", + "R2.DidComplete", + "R1.DidComplete", + "TRUNCATE", + "R1.WillRun", + "R2.WillRun", + "B", + "BYTES", + "R2.DidComplete", + "DUMP", + "R1.DidComplete", + "TRUNCATE", + "R1.WillRun", + "R2.WillRun", + "C", + "R2.DidComplete", + "R1.DidComplete", + })) + }) + }) + + Describe("CurrentSpecSummary", func() { + It("should return the spec summary for the currently running spec", func() { + var summary *types.SpecSummary + runner = newRunner( + config.GinkgoConfigType{}, + nil, + nil, + newSpec("A", noneFlag, false), + newSpecWithBody("B", func() { + var ok bool + summary, ok = runner.CurrentSpecSummary() + Ω(ok).Should(BeTrue()) + }), + newSpec("C", noneFlag, false), + ) + runner.Run() + + Ω(summary.ComponentTexts).Should(Equal([]string{"B"})) + + summary, ok := runner.CurrentSpecSummary() + Ω(summary).Should(BeNil()) + Ω(ok).Should(BeFalse()) + }) + }) + + Describe("generating a suite id", func() { + It("should generate an id randomly", func() { + runnerA := newRunner(config.GinkgoConfigType{}, nil, nil) + runnerA.Run() + IDA := reporter1.BeginSummary.SuiteID + + runnerB := newRunner(config.GinkgoConfigType{}, nil, nil) + runnerB.Run() + IDB := reporter1.BeginSummary.SuiteID + + IDRegexp := "[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}" + Ω(IDA).Should(MatchRegexp(IDRegexp)) + Ω(IDB).Should(MatchRegexp(IDRegexp)) + + Ω(IDA).ShouldNot(Equal(IDB)) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go new file mode 100644 index 00000000000..698a6e56898 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -0,0 +1,183 @@ +package suite + +import ( + "math/rand" + "net/http" + "time" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/internal/specrunner" + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" +) + +type ginkgoTestingT interface { + Fail() +} + +type Suite struct { + topLevelContainer *containernode.ContainerNode + currentContainer *containernode.ContainerNode + containerIndex int + beforeSuiteNode leafnodes.SuiteNode + afterSuiteNode leafnodes.SuiteNode + runner *specrunner.SpecRunner + failer *failer.Failer + running bool +} + +func New(failer *failer.Failer) *Suite { + topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{}) + + return &Suite{ + topLevelContainer: topLevelContainer, + currentContainer: topLevelContainer, + failer: failer, + containerIndex: 1, + } +} + +func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) { + if config.ParallelTotal < 1 { + panic("ginkgo.parallel.total must be >= 1") + } + + if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 { + panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total") + } + + r := rand.New(rand.NewSource(config.RandomSeed)) + suite.topLevelContainer.Shuffle(r) + iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config) + suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config) + + suite.running = true + success := suite.runner.Run() + if !success { + t.Fail() + } + return success, hasProgrammaticFocus +} + +func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) { + specsSlice := []*spec.Spec{} + suite.topLevelContainer.BackPropagateProgrammaticFocus() + for _, collatedNodes := range suite.topLevelContainer.Collate() { + specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress)) + } + + specs := spec.NewSpecs(specsSlice) + specs.RegexScansFilePath = config.RegexScansFilePath + + if config.RandomizeAllSpecs { + specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) + } + + specs.ApplyFocus(description, config.FocusString, config.SkipString) + + if config.SkipMeasurements { + specs.SkipMeasurements() + } + + var iterator spec_iterator.SpecIterator + + if config.ParallelTotal > 1 { + iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost) + resp, err := http.Get(config.SyncHost + "/has-counter") + if err != nil || resp.StatusCode != http.StatusOK { + iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode) + } + } else { + iterator = spec_iterator.NewSerialIterator(specs.Specs()) + } + + return iterator, specs.HasProgrammaticFocus() +} + +func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) { + return suite.runner.CurrentSpecSummary() +} + +func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.beforeSuiteNode != nil { + panic("You may only call BeforeSuite once!") + } + suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.afterSuiteNode != nil { + panic("You may only call AfterSuite once!") + } + suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.beforeSuiteNode != nil { + panic("You may only call BeforeSuite once!") + } + suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.afterSuiteNode != nil { + panic("You may only call AfterSuite once!") + } + suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) { + container := containernode.New(text, flag, codeLocation) + suite.currentContainer.PushContainerNode(container) + + previousContainer := suite.currentContainer + suite.currentContainer = container + suite.containerIndex++ + + body() + + suite.containerIndex-- + suite.currentContainer = previousContainer +} + +func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) { + if suite.running { + suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go new file mode 100644 index 00000000000..06fe1d12aba --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go @@ -0,0 +1,35 @@ +package suite_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func Test(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Suite") +} + +var numBeforeSuiteRuns = 0 +var numAfterSuiteRuns = 0 + +var _ = BeforeSuite(func() { + numBeforeSuiteRuns++ +}) + +var _ = AfterSuite(func() { + numAfterSuiteRuns++ + Ω(numBeforeSuiteRuns).Should(Equal(1)) + Ω(numAfterSuiteRuns).Should(Equal(1)) +}) + +//Fakes +type fakeTestingT struct { + didFail bool +} + +func (fakeT *fakeTestingT) Fail() { + fakeT.didFail = true +} diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go new file mode 100644 index 00000000000..b7bcdbd2e1e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite_test.go @@ -0,0 +1,371 @@ +package suite_test + +import ( + "bytes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/suite" + . "github.com/onsi/gomega" + + "math/rand" + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/codelocation" + Failer "github.com/onsi/ginkgo/internal/failer" + Writer "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" +) + +var _ = Describe("Suite", func() { + var ( + specSuite *Suite + fakeT *fakeTestingT + fakeR *reporters.FakeReporter + writer *Writer.FakeGinkgoWriter + failer *Failer.Failer + ) + + BeforeEach(func() { + writer = Writer.NewFake() + fakeT = &fakeTestingT{} + fakeR = reporters.NewFakeReporter() + failer = Failer.New() + specSuite = New(failer) + }) + + Describe("running a suite", func() { + var ( + runOrder []string + randomizeAllSpecs bool + randomSeed int64 + focusString string + parallelNode int + parallelTotal int + runResult bool + hasProgrammaticFocus bool + ) + + var f = func(runText string) func() { + return func() { + runOrder = append(runOrder, runText) + } + } + + BeforeEach(func() { + randomizeAllSpecs = false + randomSeed = 11 + parallelNode = 1 + parallelTotal = 1 + focusString = "" + + runOrder = make([]string, 0) + specSuite.SetBeforeSuiteNode(f("BeforeSuite"), codelocation.New(0), 0) + specSuite.PushBeforeEachNode(f("top BE"), codelocation.New(0), 0) + specSuite.PushJustBeforeEachNode(f("top JBE"), codelocation.New(0), 0) + specSuite.PushAfterEachNode(f("top AE"), codelocation.New(0), 0) + + specSuite.PushContainerNode("container", func() { + specSuite.PushBeforeEachNode(f("BE"), codelocation.New(0), 0) + specSuite.PushJustBeforeEachNode(f("JBE"), codelocation.New(0), 0) + specSuite.PushAfterEachNode(f("AE"), codelocation.New(0), 0) + specSuite.PushItNode("it", f("IT"), types.FlagTypeNone, codelocation.New(0), 0) + + specSuite.PushContainerNode("inner container", func() { + specSuite.PushItNode("inner it", f("inner IT"), types.FlagTypeNone, codelocation.New(0), 0) + }, types.FlagTypeNone, codelocation.New(0)) + }, types.FlagTypeNone, codelocation.New(0)) + + specSuite.PushContainerNode("container 2", func() { + specSuite.PushBeforeEachNode(f("BE 2"), codelocation.New(0), 0) + specSuite.PushItNode("it 2", f("IT 2"), types.FlagTypeNone, codelocation.New(0), 0) + }, types.FlagTypeNone, codelocation.New(0)) + + specSuite.PushItNode("top level it", f("top IT"), types.FlagTypeNone, codelocation.New(0), 0) + + specSuite.SetAfterSuiteNode(f("AfterSuite"), codelocation.New(0), 0) + }) + + JustBeforeEach(func() { + runResult, hasProgrammaticFocus = specSuite.Run(fakeT, "suite description", []reporters.Reporter{fakeR}, writer, config.GinkgoConfigType{ + RandomSeed: randomSeed, + RandomizeAllSpecs: randomizeAllSpecs, + FocusString: focusString, + ParallelNode: parallelNode, + ParallelTotal: parallelTotal, + }) + }) + + It("provides the config and suite description to the reporter", func() { + Ω(fakeR.Config.RandomSeed).Should(Equal(int64(randomSeed))) + Ω(fakeR.Config.RandomizeAllSpecs).Should(Equal(randomizeAllSpecs)) + Ω(fakeR.BeginSummary.SuiteDescription).Should(Equal("suite description")) + }) + + It("reports that the BeforeSuite node ran", func() { + Ω(fakeR.BeforeSuiteSummary).ShouldNot(BeNil()) + }) + + It("reports that the AfterSuite node ran", func() { + Ω(fakeR.AfterSuiteSummary).ShouldNot(BeNil()) + }) + + It("provides information about the current test", func() { + description := CurrentGinkgoTestDescription() + Ω(description.ComponentTexts).Should(Equal([]string{"Suite", "running a suite", "provides information about the current test"})) + Ω(description.FullTestText).Should(Equal("Suite running a suite provides information about the current test")) + Ω(description.TestText).Should(Equal("provides information about the current test")) + Ω(description.IsMeasurement).Should(BeFalse()) + Ω(description.FileName).Should(ContainSubstring("suite_test.go")) + Ω(description.LineNumber).Should(BeNumerically(">", 50)) + Ω(description.LineNumber).Should(BeNumerically("<", 150)) + Ω(description.Failed).Should(BeFalse()) + }) + + Measure("should run measurements", func(b Benchmarker) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + runtime := b.Time("sleeping", func() { + sleepTime := time.Duration(r.Float64() * 0.01 * float64(time.Second)) + time.Sleep(sleepTime) + }) + Ω(runtime.Seconds()).Should(BeNumerically("<=", 1)) + Ω(runtime.Seconds()).Should(BeNumerically(">=", 0)) + + randomValue := r.Float64() * 10.0 + b.RecordValue("random value", randomValue) + Ω(randomValue).Should(BeNumerically("<=", 10.0)) + Ω(randomValue).Should(BeNumerically(">=", 0.0)) + + b.RecordValueWithPrecision("specific value", 123.4567, "ms", 2) + b.RecordValueWithPrecision("specific value", 234.5678, "ms", 2) + }, 10) + + It("creates a node hierarchy, converts it to a spec collection, and runs it", func() { + Ω(runOrder).Should(Equal([]string{ + "BeforeSuite", + "top BE", "BE", "top JBE", "JBE", "IT", "AE", "top AE", + "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE", + "top BE", "BE 2", "top JBE", "IT 2", "top AE", + "top BE", "top JBE", "top IT", "top AE", + "AfterSuite", + })) + }) + + Context("when told to randomize all specs", func() { + BeforeEach(func() { + randomizeAllSpecs = true + }) + + It("does", func() { + Ω(runOrder).Should(Equal([]string{ + "BeforeSuite", + "top BE", "top JBE", "top IT", "top AE", + "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE", + "top BE", "BE", "top JBE", "JBE", "IT", "AE", "top AE", + "top BE", "BE 2", "top JBE", "IT 2", "top AE", + "AfterSuite", + })) + }) + }) + + Context("when provided with a filter", func() { + BeforeEach(func() { + focusString = `inner|\d` + }) + + It("converts the filter to a regular expression and uses it to filter the running specs", func() { + Ω(runOrder).Should(Equal([]string{ + "BeforeSuite", + "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE", + "top BE", "BE 2", "top JBE", "IT 2", "top AE", + "AfterSuite", + })) + }) + + It("should not report a programmatic focus", func() { + Ω(hasProgrammaticFocus).Should(BeFalse()) + }) + }) + + Context("with a programatically focused spec", func() { + BeforeEach(func() { + specSuite.PushItNode("focused it", f("focused it"), types.FlagTypeFocused, codelocation.New(0), 0) + + specSuite.PushContainerNode("focused container", func() { + specSuite.PushItNode("inner focused it", f("inner focused it"), types.FlagTypeFocused, codelocation.New(0), 0) + specSuite.PushItNode("inner unfocused it", f("inner unfocused it"), types.FlagTypeNone, codelocation.New(0), 0) + }, types.FlagTypeFocused, codelocation.New(0)) + + }) + + It("should only run the focused test, applying backpropagation to favor most deeply focused leaf nodes", func() { + Ω(runOrder).Should(Equal([]string{ + "BeforeSuite", + "top BE", "top JBE", "focused it", "top AE", + "top BE", "top JBE", "inner focused it", "top AE", + "AfterSuite", + })) + }) + + It("should report a programmatic focus", func() { + Ω(hasProgrammaticFocus).Should(BeTrue()) + }) + }) + + Context("when the specs pass", func() { + It("doesn't report a failure", func() { + Ω(fakeT.didFail).Should(BeFalse()) + }) + + It("should return true", func() { + Ω(runResult).Should(BeTrue()) + }) + }) + + Context("when a spec fails", func() { + var location types.CodeLocation + BeforeEach(func() { + specSuite.PushItNode("top level it", func() { + location = codelocation.New(0) + failer.Fail("oops!", location) + }, types.FlagTypeNone, codelocation.New(0), 0) + }) + + It("should return false", func() { + Ω(runResult).Should(BeFalse()) + }) + + It("reports a failure", func() { + Ω(fakeT.didFail).Should(BeTrue()) + }) + + It("generates the correct failure data", func() { + Ω(fakeR.SpecSummaries[0].Failure.Message).Should(Equal("oops!")) + Ω(fakeR.SpecSummaries[0].Failure.Location).Should(Equal(location)) + }) + }) + + Context("when runnable nodes are nested within other runnable nodes", func() { + Context("when an It is nested", func() { + BeforeEach(func() { + specSuite.PushItNode("top level it", func() { + specSuite.PushItNode("nested it", f("oops"), types.FlagTypeNone, codelocation.New(0), 0) + }, types.FlagTypeNone, codelocation.New(0), 0) + }) + + It("should fail", func() { + Ω(fakeT.didFail).Should(BeTrue()) + }) + }) + + Context("when a Measure is nested", func() { + BeforeEach(func() { + specSuite.PushItNode("top level it", func() { + specSuite.PushMeasureNode("nested measure", func(Benchmarker) {}, types.FlagTypeNone, codelocation.New(0), 10) + }, types.FlagTypeNone, codelocation.New(0), 0) + }) + + It("should fail", func() { + Ω(fakeT.didFail).Should(BeTrue()) + }) + }) + + Context("when a BeforeEach is nested", func() { + BeforeEach(func() { + specSuite.PushItNode("top level it", func() { + specSuite.PushBeforeEachNode(f("nested bef"), codelocation.New(0), 0) + }, types.FlagTypeNone, codelocation.New(0), 0) + }) + + It("should fail", func() { + Ω(fakeT.didFail).Should(BeTrue()) + }) + }) + + Context("when a JustBeforeEach is nested", func() { + BeforeEach(func() { + specSuite.PushItNode("top level it", func() { + specSuite.PushJustBeforeEachNode(f("nested jbef"), codelocation.New(0), 0) + }, types.FlagTypeNone, codelocation.New(0), 0) + }) + + It("should fail", func() { + Ω(fakeT.didFail).Should(BeTrue()) + }) + }) + + Context("when a AfterEach is nested", func() { + BeforeEach(func() { + specSuite.PushItNode("top level it", func() { + specSuite.PushAfterEachNode(f("nested aft"), codelocation.New(0), 0) + }, types.FlagTypeNone, codelocation.New(0), 0) + }) + + It("should fail", func() { + Ω(fakeT.didFail).Should(BeTrue()) + }) + }) + }) + }) + + Describe("BeforeSuite", func() { + Context("when setting BeforeSuite more than once", func() { + It("should panic", func() { + specSuite.SetBeforeSuiteNode(func() {}, codelocation.New(0), 0) + + Ω(func() { + specSuite.SetBeforeSuiteNode(func() {}, codelocation.New(0), 0) + }).Should(Panic()) + + }) + }) + }) + + Describe("AfterSuite", func() { + Context("when setting AfterSuite more than once", func() { + It("should panic", func() { + specSuite.SetAfterSuiteNode(func() {}, codelocation.New(0), 0) + + Ω(func() { + specSuite.SetAfterSuiteNode(func() {}, codelocation.New(0), 0) + }).Should(Panic()) + }) + }) + }) + + Describe("By", func() { + It("writes to the GinkgoWriter", func() { + originalGinkgoWriter := GinkgoWriter + buffer := &bytes.Buffer{} + + GinkgoWriter = buffer + By("Saying Hello GinkgoWriter") + GinkgoWriter = originalGinkgoWriter + + Ω(buffer.String()).Should(ContainSubstring("STEP")) + Ω(buffer.String()).Should(ContainSubstring(": Saying Hello GinkgoWriter\n")) + }) + + It("calls the passed-in callback if present", func() { + a := 0 + By("calling the callback", func() { + a = 1 + }) + Ω(a).Should(Equal(1)) + }) + + It("panics if there is more than one callback", func() { + Ω(func() { + By("registering more than one callback", func() {}, func() {}) + }).Should(Panic()) + }) + }) + + Describe("GinkgoRandomSeed", func() { + It("returns the current config's random seed", func() { + Ω(GinkgoRandomSeed()).Should(Equal(config.GinkgoConfig.RandomSeed)) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go new file mode 100644 index 00000000000..090445d084b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go @@ -0,0 +1,76 @@ +package testingtproxy + +import ( + "fmt" + "io" +) + +type failFunc func(message string, callerSkip ...int) + +func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy { + return &ginkgoTestingTProxy{ + fail: fail, + offset: offset, + writer: writer, + } +} + +type ginkgoTestingTProxy struct { + fail failFunc + offset int + writer io.Writer +} + +func (t *ginkgoTestingTProxy) Error(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fail() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) FailNow() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Log(args ...interface{}) { + fmt.Fprintln(t.writer, args...) +} + +func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { + t.Log(fmt.Sprintf(format, args...)) +} + +func (t *ginkgoTestingTProxy) Failed() bool { + return false +} + +func (t *ginkgoTestingTProxy) Parallel() { +} + +func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { + fmt.Println(args...) +} + +func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { + t.Skip(fmt.Sprintf(format, args...)) +} + +func (t *ginkgoTestingTProxy) SkipNow() { +} + +func (t *ginkgoTestingTProxy) Skipped() bool { + return false +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go new file mode 100644 index 00000000000..6739c3f605e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go @@ -0,0 +1,36 @@ +package writer + +type FakeGinkgoWriter struct { + EventStream []string +} + +func NewFake() *FakeGinkgoWriter { + return &FakeGinkgoWriter{ + EventStream: []string{}, + } +} + +func (writer *FakeGinkgoWriter) AddEvent(event string) { + writer.EventStream = append(writer.EventStream, event) +} + +func (writer *FakeGinkgoWriter) Truncate() { + writer.EventStream = append(writer.EventStream, "TRUNCATE") +} + +func (writer *FakeGinkgoWriter) DumpOut() { + writer.EventStream = append(writer.EventStream, "DUMP") +} + +func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) { + writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header) +} + +func (writer *FakeGinkgoWriter) Bytes() []byte { + writer.EventStream = append(writer.EventStream, "BYTES") + return nil +} + +func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) { + return 0, nil +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go new file mode 100644 index 00000000000..6b23b1a6487 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go @@ -0,0 +1,81 @@ +package writer + +import ( + "bytes" + "io" + "sync" +) + +type WriterInterface interface { + io.Writer + + Truncate() + DumpOut() + DumpOutWithHeader(header string) + Bytes() []byte +} + +type Writer struct { + buffer *bytes.Buffer + outWriter io.Writer + lock *sync.Mutex + stream bool +} + +func New(outWriter io.Writer) *Writer { + return &Writer{ + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + stream: true, + } +} + +func (w *Writer) SetStream(stream bool) { + w.lock.Lock() + defer w.lock.Unlock() + w.stream = stream +} + +func (w *Writer) Write(b []byte) (n int, err error) { + w.lock.Lock() + defer w.lock.Unlock() + + n, err = w.buffer.Write(b) + if w.stream { + return w.outWriter.Write(b) + } + return n, err +} + +func (w *Writer) Truncate() { + w.lock.Lock() + defer w.lock.Unlock() + w.buffer.Reset() +} + +func (w *Writer) DumpOut() { + w.lock.Lock() + defer w.lock.Unlock() + if !w.stream { + w.buffer.WriteTo(w.outWriter) + } +} + +func (w *Writer) Bytes() []byte { + w.lock.Lock() + defer w.lock.Unlock() + b := w.buffer.Bytes() + copied := make([]byte, len(b)) + copy(copied, b) + return copied +} + +func (w *Writer) DumpOutWithHeader(header string) { + w.lock.Lock() + defer w.lock.Unlock() + if !w.stream && w.buffer.Len() > 0 { + w.outWriter.Write([]byte(header)) + w.buffer.WriteTo(w.outWriter) + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go new file mode 100644 index 00000000000..e206577919a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go @@ -0,0 +1,13 @@ +package writer_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestWriter(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Writer Suite") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go new file mode 100644 index 00000000000..3e1d17c6d50 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer_test.go @@ -0,0 +1,75 @@ +package writer_test + +import ( + "github.com/onsi/gomega/gbytes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/internal/writer" + . "github.com/onsi/gomega" +) + +var _ = Describe("Writer", func() { + var writer *Writer + var out *gbytes.Buffer + + BeforeEach(func() { + out = gbytes.NewBuffer() + writer = New(out) + }) + + It("should stream directly to the outbuffer by default", func() { + writer.Write([]byte("foo")) + Ω(out).Should(gbytes.Say("foo")) + }) + + It("should not emit the header when asked to DumpOutWitHeader", func() { + writer.Write([]byte("foo")) + writer.DumpOutWithHeader("my header") + Ω(out).ShouldNot(gbytes.Say("my header")) + Ω(out).Should(gbytes.Say("foo")) + }) + + Context("when told not to stream", func() { + BeforeEach(func() { + writer.SetStream(false) + }) + + It("should only write to the buffer when told to DumpOut", func() { + writer.Write([]byte("foo")) + Ω(out).ShouldNot(gbytes.Say("foo")) + writer.DumpOut() + Ω(out).Should(gbytes.Say("foo")) + }) + + It("should truncate the internal buffer when told to truncate", func() { + writer.Write([]byte("foo")) + writer.Truncate() + writer.DumpOut() + Ω(out).ShouldNot(gbytes.Say("foo")) + + writer.Write([]byte("bar")) + writer.DumpOut() + Ω(out).Should(gbytes.Say("bar")) + }) + + Describe("emitting a header", func() { + Context("when the buffer has content", func() { + It("should emit the header followed by the content", func() { + writer.Write([]byte("foo")) + writer.DumpOutWithHeader("my header") + + Ω(out).Should(gbytes.Say("my header")) + Ω(out).Should(gbytes.Say("foo")) + }) + }) + + Context("when the buffer has no content", func() { + It("should not emit the header", func() { + writer.DumpOutWithHeader("my header") + + Ω(out).ShouldNot(gbytes.Say("my header")) + }) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go new file mode 100644 index 00000000000..fb82f70a6d8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go @@ -0,0 +1,84 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +type DefaultReporter struct { + config config.DefaultReporterConfigType + stenographer stenographer.Stenographer + specSummaries []*types.SpecSummary +} + +func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter { + return &DefaultReporter{ + config: config, + stenographer: stenographer, + } +} + +func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct) + if config.ParallelTotal > 1 { + reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct) + } else { + reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct) + } +} + +func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) + } +} + +func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) + } +} + +func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) { + if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { + reporter.stenographer.AnnounceSpecWillRun(specSummary) + } +} + +func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) { + switch specSummary.State { + case types.SpecStatePassed: + if specSummary.IsMeasurement { + reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct) + } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold { + reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct) + } else { + reporter.stenographer.AnnounceSuccesfulSpec(specSummary) + } + case types.SpecStatePending: + reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct) + case types.SpecStateSkipped: + reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStateTimedOut: + reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStatePanicked: + reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStateFailed: + reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + } + + reporter.specSummaries = append(reporter.specSummaries, specSummary) +} + +func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.stenographer.SummarizeFailures(reporter.specSummaries) + reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go new file mode 100644 index 00000000000..01528448eae --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go @@ -0,0 +1,414 @@ +package reporters_test + +import ( + "time" + + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + st "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" + . "github.com/onsi/gomega" +) + +var _ = Describe("DefaultReporter", func() { + var ( + reporter *reporters.DefaultReporter + reporterConfig config.DefaultReporterConfigType + stenographer *st.FakeStenographer + + ginkgoConfig config.GinkgoConfigType + suite *types.SuiteSummary + spec *types.SpecSummary + ) + + BeforeEach(func() { + stenographer = st.NewFakeStenographer() + reporterConfig = config.DefaultReporterConfigType{ + NoColor: false, + SlowSpecThreshold: 0.1, + NoisyPendings: false, + Verbose: true, + FullTrace: true, + } + + reporter = reporters.NewDefaultReporter(reporterConfig, stenographer) + }) + + call := func(method string, args ...interface{}) st.FakeStenographerCall { + return st.NewFakeStenographerCall(method, args...) + } + + Describe("SpecSuiteWillBegin", func() { + BeforeEach(func() { + suite = &types.SuiteSummary{ + SuiteDescription: "A Sweet Suite", + NumberOfTotalSpecs: 10, + NumberOfSpecsThatWillBeRun: 8, + } + + ginkgoConfig = config.GinkgoConfigType{ + RandomSeed: 1138, + RandomizeAllSpecs: true, + } + }) + + Context("when a serial (non-parallel) suite begins", func() { + BeforeEach(func() { + ginkgoConfig.ParallelTotal = 1 + + reporter.SpecSuiteWillBegin(ginkgoConfig, suite) + }) + + It("should announce the suite, then announce the number of specs", func() { + Ω(stenographer.Calls()).Should(HaveLen(2)) + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuite", "A Sweet Suite", ginkgoConfig.RandomSeed, true, false))) + Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceNumberOfSpecs", 8, 10, false))) + }) + }) + + Context("when a parallel suite begins", func() { + BeforeEach(func() { + ginkgoConfig.ParallelTotal = 2 + ginkgoConfig.ParallelNode = 1 + suite.NumberOfSpecsBeforeParallelization = 20 + + reporter.SpecSuiteWillBegin(ginkgoConfig, suite) + }) + + It("should announce the suite, announce that it's a parallel run, then announce the number of specs", func() { + Ω(stenographer.Calls()).Should(HaveLen(2)) + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuite", "A Sweet Suite", ginkgoConfig.RandomSeed, true, false))) + Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceParallelRun", 1, 2, false))) + }) + }) + }) + + Describe("BeforeSuiteDidRun", func() { + Context("when the BeforeSuite passes", func() { + It("should announce nothing", func() { + reporter.BeforeSuiteDidRun(&types.SetupSummary{ + State: types.SpecStatePassed, + }) + + Ω(stenographer.Calls()).Should(BeEmpty()) + }) + }) + + Context("when the BeforeSuite fails", func() { + It("should announce the failure", func() { + summary := &types.SetupSummary{ + State: types.SpecStateFailed, + } + reporter.BeforeSuiteDidRun(summary) + + Ω(stenographer.Calls()).Should(HaveLen(1)) + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceBeforeSuiteFailure", summary, false, true))) + }) + }) + }) + + Describe("AfterSuiteDidRun", func() { + Context("when the AfterSuite passes", func() { + It("should announce nothing", func() { + reporter.AfterSuiteDidRun(&types.SetupSummary{ + State: types.SpecStatePassed, + }) + + Ω(stenographer.Calls()).Should(BeEmpty()) + }) + }) + + Context("when the AfterSuite fails", func() { + It("should announce the failure", func() { + summary := &types.SetupSummary{ + State: types.SpecStateFailed, + } + reporter.AfterSuiteDidRun(summary) + + Ω(stenographer.Calls()).Should(HaveLen(1)) + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceAfterSuiteFailure", summary, false, true))) + }) + }) + }) + + Describe("SpecWillRun", func() { + Context("When running in verbose mode", func() { + Context("and the spec will run", func() { + BeforeEach(func() { + spec = &types.SpecSummary{} + reporter.SpecWillRun(spec) + }) + + It("should announce that the spec will run", func() { + Ω(stenographer.Calls()).Should(HaveLen(1)) + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecWillRun", spec))) + }) + }) + + Context("and the spec will not run", func() { + Context("because it is pending", func() { + BeforeEach(func() { + spec = &types.SpecSummary{ + State: types.SpecStatePending, + } + reporter.SpecWillRun(spec) + }) + + It("should announce nothing", func() { + Ω(stenographer.Calls()).Should(BeEmpty()) + }) + }) + + Context("because it is skipped", func() { + BeforeEach(func() { + spec = &types.SpecSummary{ + State: types.SpecStateSkipped, + } + reporter.SpecWillRun(spec) + }) + + It("should announce nothing", func() { + Ω(stenographer.Calls()).Should(BeEmpty()) + }) + }) + }) + }) + + Context("When running in verbose & succinct mode", func() { + BeforeEach(func() { + reporterConfig.Succinct = true + reporter = reporters.NewDefaultReporter(reporterConfig, stenographer) + spec = &types.SpecSummary{} + reporter.SpecWillRun(spec) + }) + + It("should announce nothing", func() { + Ω(stenographer.Calls()).Should(BeEmpty()) + }) + }) + + Context("When not running in verbose mode", func() { + BeforeEach(func() { + reporterConfig.Verbose = false + reporter = reporters.NewDefaultReporter(reporterConfig, stenographer) + spec = &types.SpecSummary{} + reporter.SpecWillRun(spec) + }) + + It("should announce nothing", func() { + Ω(stenographer.Calls()).Should(BeEmpty()) + }) + }) + }) + + Describe("SpecDidComplete", func() { + JustBeforeEach(func() { + reporter.SpecDidComplete(spec) + }) + + BeforeEach(func() { + spec = &types.SpecSummary{} + }) + + Context("When the spec passed", func() { + BeforeEach(func() { + spec.State = types.SpecStatePassed + }) + + Context("When the spec was a measurement", func() { + BeforeEach(func() { + spec.IsMeasurement = true + }) + + It("should announce the measurement", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulMeasurement", spec, false))) + }) + }) + + Context("When the spec is slow", func() { + BeforeEach(func() { + spec.RunTime = time.Second + }) + + It("should announce that it was slow", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSlowSpec", spec, false))) + }) + }) + + Context("Otherwise", func() { + It("should announce the succesful spec", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSpec", spec))) + }) + }) + }) + + Context("When the spec is pending", func() { + BeforeEach(func() { + spec.State = types.SpecStatePending + }) + + It("should announce the pending spec, succinctly", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, false))) + }) + }) + + Context("When the spec is skipped", func() { + BeforeEach(func() { + spec.State = types.SpecStateSkipped + }) + + It("should announce the skipped spec", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec, false, true))) + }) + }) + + Context("When the spec timed out", func() { + BeforeEach(func() { + spec.State = types.SpecStateTimedOut + }) + + It("should announce the timedout spec", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecTimedOut", spec, false, true))) + }) + }) + + Context("When the spec panicked", func() { + BeforeEach(func() { + spec.State = types.SpecStatePanicked + }) + + It("should announce the panicked spec", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecPanicked", spec, false, true))) + }) + }) + + Context("When the spec failed", func() { + BeforeEach(func() { + spec.State = types.SpecStateFailed + }) + + It("should announce the failed spec", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecFailed", spec, false, true))) + }) + }) + + Context("in noisy pendings mode", func() { + BeforeEach(func() { + reporterConfig.Succinct = false + reporterConfig.NoisyPendings = true + reporter = reporters.NewDefaultReporter(reporterConfig, stenographer) + }) + + Context("When the spec is pending", func() { + BeforeEach(func() { + spec.State = types.SpecStatePending + }) + + It("should announce the pending spec, noisily", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, true))) + }) + }) + }) + + Context("in succinct mode", func() { + BeforeEach(func() { + reporterConfig.Succinct = true + reporter = reporters.NewDefaultReporter(reporterConfig, stenographer) + }) + + Context("When the spec passed", func() { + BeforeEach(func() { + spec.State = types.SpecStatePassed + }) + + Context("When the spec was a measurement", func() { + BeforeEach(func() { + spec.IsMeasurement = true + }) + + It("should announce the measurement", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulMeasurement", spec, true))) + }) + }) + + Context("When the spec is slow", func() { + BeforeEach(func() { + spec.RunTime = time.Second + }) + + It("should announce that it was slow", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSlowSpec", spec, true))) + }) + }) + + Context("Otherwise", func() { + It("should announce the succesful spec", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSpec", spec))) + }) + }) + }) + + Context("When the spec is pending", func() { + BeforeEach(func() { + spec.State = types.SpecStatePending + }) + + It("should announce the pending spec, succinctly", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, false))) + }) + }) + + Context("When the spec is skipped", func() { + BeforeEach(func() { + spec.State = types.SpecStateSkipped + }) + + It("should announce the skipped spec", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec, true, true))) + }) + }) + + Context("When the spec timed out", func() { + BeforeEach(func() { + spec.State = types.SpecStateTimedOut + }) + + It("should announce the timedout spec", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecTimedOut", spec, true, true))) + }) + }) + + Context("When the spec panicked", func() { + BeforeEach(func() { + spec.State = types.SpecStatePanicked + }) + + It("should announce the panicked spec", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecPanicked", spec, true, true))) + }) + }) + + Context("When the spec failed", func() { + BeforeEach(func() { + spec.State = types.SpecStateFailed + }) + + It("should announce the failed spec", func() { + Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecFailed", spec, true, true))) + }) + }) + }) + }) + + Describe("SpecSuiteDidEnd", func() { + BeforeEach(func() { + suite = &types.SuiteSummary{} + reporter.SpecSuiteDidEnd(suite) + }) + + It("should announce the spec run's completion", func() { + Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceSpecRunCompletion", suite, false))) + }) + }) +}) diff --git a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go new file mode 100644 index 00000000000..27db4794908 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go @@ -0,0 +1,59 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +//FakeReporter is useful for testing purposes +type FakeReporter struct { + Config config.GinkgoConfigType + + BeginSummary *types.SuiteSummary + BeforeSuiteSummary *types.SetupSummary + SpecWillRunSummaries []*types.SpecSummary + SpecSummaries []*types.SpecSummary + AfterSuiteSummary *types.SetupSummary + EndSummary *types.SuiteSummary + + SpecWillRunStub func(specSummary *types.SpecSummary) + SpecDidCompleteStub func(specSummary *types.SpecSummary) +} + +func NewFakeReporter() *FakeReporter { + return &FakeReporter{ + SpecWillRunSummaries: make([]*types.SpecSummary, 0), + SpecSummaries: make([]*types.SpecSummary, 0), + } +} + +func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + fakeR.Config = config + fakeR.BeginSummary = summary +} + +func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + fakeR.BeforeSuiteSummary = setupSummary +} + +func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) { + if fakeR.SpecWillRunStub != nil { + fakeR.SpecWillRunStub(specSummary) + } + fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary) +} + +func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) { + if fakeR.SpecDidCompleteStub != nil { + fakeR.SpecDidCompleteStub(specSummary) + } + fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary) +} + +func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + fakeR.AfterSuiteSummary = setupSummary +} + +func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + fakeR.EndSummary = summary +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go new file mode 100644 index 00000000000..89b03513fd1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -0,0 +1,147 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "os" + "strings" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + TestCases []JUnitTestCase `xml:"testcase"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Time float64 `xml:"time,attr"` +} + +type JUnitTestCase struct { + Name string `xml:"name,attr"` + ClassName string `xml:"classname,attr"` + FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + Time float64 `xml:"time,attr"` + SystemOut string `xml:"system-out,omitempty"` +} + +type JUnitFailureMessage struct { + Type string `xml:"type,attr"` + Message string `xml:",chardata"` +} + +type JUnitSkipped struct { + XMLName xml.Name `xml:"skipped"` +} + +type JUnitReporter struct { + suite JUnitTestSuite + filename string + testSuiteName string +} + +//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename. +func NewJUnitReporter(filename string) *JUnitReporter { + return &JUnitReporter{ + filename: filename, + } +} + +func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.suite = JUnitTestSuite{ + TestCases: []JUnitTestCase{}, + } + reporter.testSuiteName = summary.SuiteDescription +} + +func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) { +} + +func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("BeforeSuite", setupSummary) +} + +func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("AfterSuite", setupSummary) +} + +func failureMessage(failure types.SpecFailure) string { + return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String()) +} + +func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + testCase := JUnitTestCase{ + Name: name, + ClassName: reporter.testSuiteName, + } + + testCase.FailureMessage = &JUnitFailureMessage{ + Type: reporter.failureTypeForState(setupSummary.State), + Message: failureMessage(setupSummary.Failure), + } + testCase.SystemOut = setupSummary.CapturedOutput + testCase.Time = setupSummary.RunTime.Seconds() + reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) + } +} + +func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { + testCase := JUnitTestCase{ + Name: strings.Join(specSummary.ComponentTexts[1:], " "), + ClassName: reporter.testSuiteName, + } + if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { + testCase.FailureMessage = &JUnitFailureMessage{ + Type: reporter.failureTypeForState(specSummary.State), + Message: failureMessage(specSummary.Failure), + } + testCase.SystemOut = specSummary.CapturedOutput + } + if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { + testCase.Skipped = &JUnitSkipped{} + } + testCase.Time = specSummary.RunTime.Seconds() + reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) +} + +func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun + reporter.suite.Time = summary.RunTime.Seconds() + reporter.suite.Failures = summary.NumberOfFailedSpecs + file, err := os.Create(reporter.filename) + if err != nil { + fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error()) + } + defer file.Close() + file.WriteString(xml.Header) + encoder := xml.NewEncoder(file) + encoder.Indent(" ", " ") + err = encoder.Encode(reporter.suite) + if err != nil { + fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error()) + } +} + +func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string { + switch state { + case types.SpecStateFailed: + return "Failure" + case types.SpecStateTimedOut: + return "Timeout" + case types.SpecStatePanicked: + return "Panic" + default: + return "" + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter_test.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter_test.go new file mode 100644 index 00000000000..37a6daabdfb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter_test.go @@ -0,0 +1,247 @@ +package reporters_test + +import ( + "encoding/xml" + "io/ioutil" + "os" + "time" + + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + . "github.com/onsi/gomega" +) + +var _ = Describe("JUnit Reporter", func() { + var ( + outputFile string + reporter Reporter + ) + + readOutputFile := func() reporters.JUnitTestSuite { + bytes, err := ioutil.ReadFile(outputFile) + Ω(err).ShouldNot(HaveOccurred()) + var suite reporters.JUnitTestSuite + err = xml.Unmarshal(bytes, &suite) + Ω(err).ShouldNot(HaveOccurred()) + return suite + } + + BeforeEach(func() { + f, err := ioutil.TempFile("", "output") + Ω(err).ShouldNot(HaveOccurred()) + f.Close() + outputFile = f.Name() + + reporter = reporters.NewJUnitReporter(outputFile) + + reporter.SpecSuiteWillBegin(config.GinkgoConfigType{}, &types.SuiteSummary{ + SuiteDescription: "My test suite", + NumberOfSpecsThatWillBeRun: 1, + }) + }) + + AfterEach(func() { + os.RemoveAll(outputFile) + }) + + Describe("a passing test", func() { + BeforeEach(func() { + beforeSuite := &types.SetupSummary{ + State: types.SpecStatePassed, + } + reporter.BeforeSuiteDidRun(beforeSuite) + + afterSuite := &types.SetupSummary{ + State: types.SpecStatePassed, + } + reporter.AfterSuiteDidRun(afterSuite) + + spec := &types.SpecSummary{ + ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, + State: types.SpecStatePassed, + RunTime: 5 * time.Second, + } + reporter.SpecWillRun(spec) + reporter.SpecDidComplete(spec) + + reporter.SpecSuiteDidEnd(&types.SuiteSummary{ + NumberOfSpecsThatWillBeRun: 1, + NumberOfFailedSpecs: 0, + RunTime: 10 * time.Second, + }) + }) + + It("should record the test as passing", func() { + output := readOutputFile() + Ω(output.Tests).Should(Equal(1)) + Ω(output.Failures).Should(Equal(0)) + Ω(output.Time).Should(Equal(10.0)) + Ω(output.TestCases).Should(HaveLen(1)) + Ω(output.TestCases[0].Name).Should(Equal("A B C")) + Ω(output.TestCases[0].ClassName).Should(Equal("My test suite")) + Ω(output.TestCases[0].FailureMessage).Should(BeNil()) + Ω(output.TestCases[0].Skipped).Should(BeNil()) + Ω(output.TestCases[0].Time).Should(Equal(5.0)) + }) + }) + + Describe("when the BeforeSuite fails", func() { + var beforeSuite *types.SetupSummary + + BeforeEach(func() { + beforeSuite = &types.SetupSummary{ + State: types.SpecStateFailed, + RunTime: 3 * time.Second, + Failure: types.SpecFailure{ + Message: "failed to setup", + ComponentCodeLocation: codelocation.New(0), + Location: codelocation.New(2), + }, + } + reporter.BeforeSuiteDidRun(beforeSuite) + + reporter.SpecSuiteDidEnd(&types.SuiteSummary{ + NumberOfSpecsThatWillBeRun: 1, + NumberOfFailedSpecs: 1, + RunTime: 10 * time.Second, + }) + }) + + It("should record the test as having failed", func() { + output := readOutputFile() + Ω(output.Tests).Should(Equal(1)) + Ω(output.Failures).Should(Equal(1)) + Ω(output.Time).Should(Equal(10.0)) + Ω(output.TestCases[0].Name).Should(Equal("BeforeSuite")) + Ω(output.TestCases[0].Time).Should(Equal(3.0)) + Ω(output.TestCases[0].ClassName).Should(Equal("My test suite")) + Ω(output.TestCases[0].FailureMessage.Type).Should(Equal("Failure")) + Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring("failed to setup")) + Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(beforeSuite.Failure.ComponentCodeLocation.String())) + Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(beforeSuite.Failure.Location.String())) + Ω(output.TestCases[0].Skipped).Should(BeNil()) + }) + }) + + Describe("when the AfterSuite fails", func() { + var afterSuite *types.SetupSummary + + BeforeEach(func() { + afterSuite = &types.SetupSummary{ + State: types.SpecStateFailed, + RunTime: 3 * time.Second, + Failure: types.SpecFailure{ + Message: "failed to setup", + ComponentCodeLocation: codelocation.New(0), + Location: codelocation.New(2), + }, + } + reporter.AfterSuiteDidRun(afterSuite) + + reporter.SpecSuiteDidEnd(&types.SuiteSummary{ + NumberOfSpecsThatWillBeRun: 1, + NumberOfFailedSpecs: 1, + RunTime: 10 * time.Second, + }) + }) + + It("should record the test as having failed", func() { + output := readOutputFile() + Ω(output.Tests).Should(Equal(1)) + Ω(output.Failures).Should(Equal(1)) + Ω(output.Time).Should(Equal(10.0)) + Ω(output.TestCases[0].Name).Should(Equal("AfterSuite")) + Ω(output.TestCases[0].Time).Should(Equal(3.0)) + Ω(output.TestCases[0].ClassName).Should(Equal("My test suite")) + Ω(output.TestCases[0].FailureMessage.Type).Should(Equal("Failure")) + Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring("failed to setup")) + Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(afterSuite.Failure.ComponentCodeLocation.String())) + Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(afterSuite.Failure.Location.String())) + Ω(output.TestCases[0].Skipped).Should(BeNil()) + }) + }) + + specStateCases := []struct { + state types.SpecState + message string + }{ + {types.SpecStateFailed, "Failure"}, + {types.SpecStateTimedOut, "Timeout"}, + {types.SpecStatePanicked, "Panic"}, + } + + for _, specStateCase := range specStateCases { + specStateCase := specStateCase + Describe("a failing test", func() { + var spec *types.SpecSummary + BeforeEach(func() { + spec = &types.SpecSummary{ + ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, + State: specStateCase.state, + RunTime: 5 * time.Second, + Failure: types.SpecFailure{ + ComponentCodeLocation: codelocation.New(0), + Location: codelocation.New(2), + Message: "I failed", + }, + } + reporter.SpecWillRun(spec) + reporter.SpecDidComplete(spec) + + reporter.SpecSuiteDidEnd(&types.SuiteSummary{ + NumberOfSpecsThatWillBeRun: 1, + NumberOfFailedSpecs: 1, + RunTime: 10 * time.Second, + }) + }) + + It("should record test as failing", func() { + output := readOutputFile() + Ω(output.Tests).Should(Equal(1)) + Ω(output.Failures).Should(Equal(1)) + Ω(output.Time).Should(Equal(10.0)) + Ω(output.TestCases[0].Name).Should(Equal("A B C")) + Ω(output.TestCases[0].ClassName).Should(Equal("My test suite")) + Ω(output.TestCases[0].FailureMessage.Type).Should(Equal(specStateCase.message)) + Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring("I failed")) + Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(spec.Failure.ComponentCodeLocation.String())) + Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(spec.Failure.Location.String())) + Ω(output.TestCases[0].Skipped).Should(BeNil()) + }) + }) + } + + for _, specStateCase := range []types.SpecState{types.SpecStatePending, types.SpecStateSkipped} { + specStateCase := specStateCase + Describe("a skipped test", func() { + var spec *types.SpecSummary + BeforeEach(func() { + spec = &types.SpecSummary{ + ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, + State: specStateCase, + RunTime: 5 * time.Second, + } + reporter.SpecWillRun(spec) + reporter.SpecDidComplete(spec) + + reporter.SpecSuiteDidEnd(&types.SuiteSummary{ + NumberOfSpecsThatWillBeRun: 1, + NumberOfFailedSpecs: 0, + RunTime: 10 * time.Second, + }) + }) + + It("should record test as failing", func() { + output := readOutputFile() + Ω(output.Tests).Should(Equal(1)) + Ω(output.Failures).Should(Equal(0)) + Ω(output.Time).Should(Equal(10.0)) + Ω(output.TestCases[0].Name).Should(Equal("A B C")) + Ω(output.TestCases[0].Skipped).ShouldNot(BeNil()) + }) + }) + } +}) diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/reporters/reporter.go new file mode 100644 index 00000000000..348b9dfce1f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/reporter.go @@ -0,0 +1,15 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +type Reporter interface { + SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SpecSuiteDidEnd(summary *types.SuiteSummary) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporters_suite_test.go b/vendor/github.com/onsi/ginkgo/reporters/reporters_suite_test.go new file mode 100644 index 00000000000..cec5a4dbfb0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/reporters_suite_test.go @@ -0,0 +1,13 @@ +package reporters_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestReporters(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Reporters Suite") +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go new file mode 100644 index 00000000000..45b8f88690e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go @@ -0,0 +1,64 @@ +package stenographer + +import ( + "fmt" + "strings" +) + +func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string { + var out string + + if len(args) > 0 { + out = fmt.Sprintf(format, args...) + } else { + out = format + } + + if s.color { + return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle) + } else { + return out + } +} + +func (s *consoleStenographer) printBanner(text string, bannerCharacter string) { + fmt.Fprintln(s.w, text) + fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text))) +} + +func (s *consoleStenographer) printNewLine() { + fmt.Fprintln(s.w, "") +} + +func (s *consoleStenographer) printDelimiter() { + fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30))) +} + +func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) { + fmt.Fprint(s.w, s.indent(indentation, format, args...)) +} + +func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) { + fmt.Fprintln(s.w, s.indent(indentation, format, args...)) +} + +func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string { + var text string + + if len(args) > 0 { + text = fmt.Sprintf(format, args...) + } else { + text = format + } + + stringArray := strings.Split(text, "\n") + padding := "" + if indentation >= 0 { + padding = strings.Repeat(" ", indentation) + } + for i, s := range stringArray { + stringArray[i] = fmt.Sprintf("%s%s", padding, s) + } + + return strings.Join(stringArray, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go new file mode 100644 index 00000000000..98854e7d9aa --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go @@ -0,0 +1,142 @@ +package stenographer + +import ( + "sync" + + "github.com/onsi/ginkgo/types" +) + +func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall { + return FakeStenographerCall{ + Method: method, + Args: args, + } +} + +type FakeStenographer struct { + calls []FakeStenographerCall + lock *sync.Mutex +} + +type FakeStenographerCall struct { + Method string + Args []interface{} +} + +func NewFakeStenographer() *FakeStenographer { + stenographer := &FakeStenographer{ + lock: &sync.Mutex{}, + } + stenographer.Reset() + return stenographer +} + +func (stenographer *FakeStenographer) Calls() []FakeStenographerCall { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + return stenographer.calls +} + +func (stenographer *FakeStenographer) Reset() { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + stenographer.calls = make([]FakeStenographerCall, 0) +} + +func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + results := make([]FakeStenographerCall, 0) + for _, call := range stenographer.calls { + if call.Method == method { + results = append(results, call) + } + } + + return results +} + +func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...)) +} + +func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { + stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct) +} + +func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { + stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct) +} + +func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { + stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct) +} + +func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { + stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct) +} + +func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { + stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { + stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { + stenographer.registerCall("AnnounceSpecWillRun", spec) +} + +func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace) +} +func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) { + stenographer.registerCall("AnnounceCapturedOutput", output) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { + stenographer.registerCall("AnnounceSuccesfulSpec", spec) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct) +} + +func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { + stenographer.registerCall("AnnouncePendingSpec", spec, noisy) +} + +func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) { + stenographer.registerCall("SummarizeFailures", summaries) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go new file mode 100644 index 00000000000..fefd3e182f5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go @@ -0,0 +1,573 @@ +/* +The stenographer is used by Ginkgo's reporters to generate output. + +Move along, nothing to see here. +*/ + +package stenographer + +import ( + "fmt" + "io" + "runtime" + "strings" + + "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" + "github.com/onsi/ginkgo/types" +) + +const defaultStyle = "\x1b[0m" +const boldStyle = "\x1b[1m" +const redColor = "\x1b[91m" +const greenColor = "\x1b[32m" +const yellowColor = "\x1b[33m" +const cyanColor = "\x1b[36m" +const grayColor = "\x1b[90m" +const lightGrayColor = "\x1b[37m" + +type cursorStateType int + +const ( + cursorStateTop cursorStateType = iota + cursorStateStreaming + cursorStateMidBlock + cursorStateEndBlock +) + +type Stenographer interface { + AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) + AnnounceAggregatedParallelRun(nodes int, succinct bool) + AnnounceParallelRun(node int, nodes int, succinct bool) + AnnounceTotalNumberOfSpecs(total int, succinct bool) + AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) + AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) + + AnnounceSpecWillRun(spec *types.SpecSummary) + AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) + AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) + + AnnounceCapturedOutput(output string) + + AnnounceSuccesfulSpec(spec *types.SpecSummary) + AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) + AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) + + AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) + AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) + + AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) + AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) + AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) + + SummarizeFailures(summaries []*types.SpecSummary) +} + +func New(color bool, enableFlakes bool) Stenographer { + denoter := "•" + if runtime.GOOS == "windows" { + denoter = "+" + } + return &consoleStenographer{ + color: color, + denoter: denoter, + cursorState: cursorStateTop, + enableFlakes: enableFlakes, + w: colorable.NewColorableStdout(), + } +} + +type consoleStenographer struct { + color bool + denoter string + cursorState cursorStateType + enableFlakes bool + w io.Writer +} + +var alternatingColors = []string{defaultStyle, grayColor} + +func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { + if succinct { + s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description)) + return + } + s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=") + s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed)) + if randomizingAll { + s.print(0, " - Will randomize all specs") + } + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { + if succinct { + s.print(0, "- node #%d ", node) + return + } + s.println(0, + "Parallel test node %s/%s.", + s.colorize(boldStyle, "%d", node), + s.colorize(boldStyle, "%d", nodes), + ) + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { + if succinct { + s.print(0, "- %d nodes ", nodes) + return + } + s.println(0, + "Running in parallel across %s nodes", + s.colorize(boldStyle, "%d", nodes), + ) + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { + if succinct { + s.print(0, "- %d/%d specs ", specsToRun, total) + s.stream() + return + } + s.println(0, + "Will run %s of %s specs", + s.colorize(boldStyle, "%d", specsToRun), + s.colorize(boldStyle, "%d", total), + ) + + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { + if succinct { + s.print(0, "- %d specs ", total) + s.stream() + return + } + s.println(0, + "Will run %s specs", + s.colorize(boldStyle, "%d", total), + ) + + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { + if succinct && summary.SuiteSucceeded { + s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime) + return + } + s.printNewLine() + color := greenColor + if !summary.SuiteSucceeded { + color = redColor + } + s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds())) + + status := "" + if summary.SuiteSucceeded { + status = s.colorize(boldStyle+greenColor, "SUCCESS!") + } else { + status = s.colorize(boldStyle+redColor, "FAIL!") + } + + flakes := "" + if s.enableFlakes { + flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs) + } + + s.print(0, + "%s -- %s | %s | %s | %s ", + status, + s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs), + s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes, + s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs), + s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs), + ) +} + +func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { + s.startBlock() + for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] { + s.print(0, s.colorize(alternatingColors[i%2], text)+" ") + } + + indentation := 0 + if len(spec.ComponentTexts) > 2 { + indentation = 1 + s.printNewLine() + } + index := len(spec.ComponentTexts) - 1 + s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index])) + s.printNewLine() + s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String())) + s.printNewLine() + s.midBlock() +} + +func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace) +} + +func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.startBlock() + var message string + switch summary.State { + case types.SpecStateFailed: + message = "Failure" + case types.SpecStatePanicked: + message = "Panic" + case types.SpecStateTimedOut: + message = "Timeout" + } + + s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true) + + s.printNewLine() + s.printFailure(indentation, summary.State, summary.Failure, fullTrace) + + s.endBlock() +} + +func (s *consoleStenographer) AnnounceCapturedOutput(output string) { + if output == "" { + return + } + + s.startBlock() + s.println(0, output) + s.midBlock() +} + +func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { + s.print(0, s.colorize(greenColor, s.denoter)) + s.stream() +} + +func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { + s.printBlockWithMessage( + s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()), + "", + spec, + succinct, + ) +} + +func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { + s.printBlockWithMessage( + s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter), + s.measurementReport(spec, succinct), + spec, + succinct, + ) +} + +func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { + if noisy { + s.printBlockWithMessage( + s.colorize(yellowColor, "P [PENDING]"), + "", + spec, + false, + ) + } else { + s.print(0, s.colorize(yellowColor, "P")) + s.stream() + } +} + +func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { + // Skips at runtime will have a non-empty spec.Failure. All others should be succinct. + if succinct || spec.Failure == (types.SpecFailure{}) { + s.print(0, s.colorize(cyanColor, "S")) + s.stream() + } else { + s.startBlock() + s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) + + s.printNewLine() + s.printSkip(indentation, spec.Failure) + s.endBlock() + } +} + +func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) { + failingSpecs := []*types.SpecSummary{} + + for _, summary := range summaries { + if summary.HasFailureState() { + failingSpecs = append(failingSpecs, summary) + } + } + + if len(failingSpecs) == 0 { + return + } + + s.printNewLine() + s.printNewLine() + plural := "s" + if len(failingSpecs) == 1 { + plural = "" + } + s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural)) + for _, summary := range failingSpecs { + s.printNewLine() + if summary.HasFailureState() { + if summary.TimedOut() { + s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] ")) + } else if summary.Panicked() { + s.print(0, s.colorize(redColor+boldStyle, "[Panic!] ")) + } else if summary.Failed() { + s.print(0, s.colorize(redColor+boldStyle, "[Fail] ")) + } + s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true) + s.printNewLine() + s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String())) + } + } +} + +func (s *consoleStenographer) startBlock() { + if s.cursorState == cursorStateStreaming { + s.printNewLine() + s.printDelimiter() + } else if s.cursorState == cursorStateMidBlock { + s.printNewLine() + } +} + +func (s *consoleStenographer) midBlock() { + s.cursorState = cursorStateMidBlock +} + +func (s *consoleStenographer) endBlock() { + s.printDelimiter() + s.cursorState = cursorStateEndBlock +} + +func (s *consoleStenographer) stream() { + s.cursorState = cursorStateStreaming +} + +func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) { + s.startBlock() + s.println(0, header) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct) + + if message != "" { + s.printNewLine() + s.println(indentation, message) + } + + s.endBlock() +} + +func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.startBlock() + s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) + + s.printNewLine() + s.printFailure(indentation, spec.State, spec.Failure, fullTrace) + s.endBlock() +} + +func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string { + switch failedComponentType { + case types.SpecComponentTypeBeforeSuite: + return " in Suite Setup (BeforeSuite)" + case types.SpecComponentTypeAfterSuite: + return " in Suite Teardown (AfterSuite)" + case types.SpecComponentTypeBeforeEach: + return " in Spec Setup (BeforeEach)" + case types.SpecComponentTypeJustBeforeEach: + return " in Spec Setup (JustBeforeEach)" + case types.SpecComponentTypeAfterEach: + return " in Spec Teardown (AfterEach)" + } + + return "" +} + +func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) { + s.println(indentation, s.colorize(cyanColor, spec.Message)) + s.printNewLine() + s.println(indentation, spec.Location.String()) +} + +func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) { + if state == types.SpecStatePanicked { + s.println(indentation, s.colorize(redColor+boldStyle, failure.Message)) + s.println(indentation, s.colorize(redColor, failure.ForwardedPanic)) + s.println(indentation, failure.Location.String()) + s.printNewLine() + s.println(indentation, s.colorize(redColor, "Full Stack Trace")) + s.println(indentation, failure.Location.FullStackTrace) + } else { + s.println(indentation, s.colorize(redColor, failure.Message)) + s.printNewLine() + s.println(indentation, failure.Location.String()) + if fullTrace { + s.printNewLine() + s.println(indentation, s.colorize(redColor, "Full Stack Trace")) + s.println(indentation, failure.Location.FullStackTrace) + } + } +} + +func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { + startIndex := 1 + indentation := 0 + + if len(componentTexts) == 1 { + startIndex = 0 + } + + for i := startIndex; i < len(componentTexts); i++ { + if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex { + color := redColor + if state == types.SpecStateSkipped { + color = cyanColor + } + blockType := "" + switch failedComponentType { + case types.SpecComponentTypeBeforeSuite: + blockType = "BeforeSuite" + case types.SpecComponentTypeAfterSuite: + blockType = "AfterSuite" + case types.SpecComponentTypeBeforeEach: + blockType = "BeforeEach" + case types.SpecComponentTypeJustBeforeEach: + blockType = "JustBeforeEach" + case types.SpecComponentTypeAfterEach: + blockType = "AfterEach" + case types.SpecComponentTypeIt: + blockType = "It" + case types.SpecComponentTypeMeasure: + blockType = "Measurement" + } + if succinct { + s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i])) + } else { + s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType)) + s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) + } + } else { + if succinct { + s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i])) + } else { + s.println(indentation, componentTexts[i]) + s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) + } + } + indentation++ + } + + return indentation +} + +func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { + indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct) + + if succinct { + if len(componentTexts) > 0 { + s.printNewLine() + s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1])) + } + s.printNewLine() + indentation = 1 + } else { + indentation-- + } + + return indentation +} + +func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string { + orderedKeys := make([]string, len(measurements)) + for key, measurement := range measurements { + orderedKeys[measurement.Order] = key + } + return orderedKeys +} + +func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string { + if len(spec.Measurements) == 0 { + return "Found no measurements" + } + + message := []string{} + orderedKeys := s.orderedMeasurementKeys(spec.Measurements) + + if succinct { + message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) + for _, key := range orderedKeys { + measurement := spec.Measurements[key] + message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s", + s.colorize(boldStyle, "%s", measurement.Name), + measurement.SmallestLabel, + s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), + measurement.Units, + measurement.AverageLabel, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), + measurement.Units, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), + measurement.Units, + measurement.LargestLabel, + s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), + measurement.Units, + )) + } + } else { + message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) + for _, key := range orderedKeys { + measurement := spec.Measurements[key] + info := "" + if measurement.Info != nil { + message = append(message, fmt.Sprintf("%v", measurement.Info)) + } + + message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s", + s.colorize(boldStyle, "%s", measurement.Name), + info, + measurement.SmallestLabel, + s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), + measurement.Units, + measurement.LargestLabel, + s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), + measurement.Units, + measurement.AverageLabel, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), + measurement.Units, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), + measurement.Units, + )) + } + } + + return strings.Join(message, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE new file mode 100644 index 00000000000..91b5cef30eb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md new file mode 100644 index 00000000000..e84226a7358 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md @@ -0,0 +1,43 @@ +# go-colorable + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go new file mode 100644 index 00000000000..52d6653b34b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go @@ -0,0 +1,24 @@ +// +build !windows + +package colorable + +import ( + "io" + "os" +) + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +func NewColorableStdout() io.Writer { + return os.Stdout +} + +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go new file mode 100644 index 00000000000..1088009230b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go @@ -0,0 +1,783 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") +) + +type Writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2) + csbi.cursorPosition.x = short(n1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i += 1 { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go new file mode 100644 index 00000000000..fb976dbd8b7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" +) + +type NonColorable struct { + out io.Writer + lastbuf bytes.Buffer +} + +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewBuffer(data) +loop: + for { + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + return len(data) - w.lastbuf.Len(), nil +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE new file mode 100644 index 00000000000..65dc692b6b1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md new file mode 100644 index 00000000000..74845de4a24 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md @@ -0,0 +1,37 @@ +# go-isatty + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go new file mode 100644 index 00000000000..17d4f90ebcc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go new file mode 100644 index 00000000000..83c588773cf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go @@ -0,0 +1,9 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go new file mode 100644 index 00000000000..98ffe86a4ba --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go new file mode 100644 index 00000000000..9d24bac1db3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go new file mode 100644 index 00000000000..1f0c6bf53dc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go new file mode 100644 index 00000000000..83c398b16db --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go @@ -0,0 +1,19 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") +var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go new file mode 100644 index 00000000000..657dfe726e2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go @@ -0,0 +1,92 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" + "io" + "strings" +) + +const ( + messageId = "##teamcity" +) + +type TeamCityReporter struct { + writer io.Writer + testSuiteName string +} + +func NewTeamCityReporter(writer io.Writer) *TeamCityReporter { + return &TeamCityReporter{ + writer: writer, + } +} + +func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.testSuiteName = escape(summary.SuiteDescription) + fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName) +} + +func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("BeforeSuite", setupSummary) +} + +func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("AfterSuite", setupSummary) +} + +func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + testName := escape(name) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) + message := escape(setupSummary.Failure.ComponentCodeLocation.String()) + details := escape(setupSummary.Failure.Message) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000 + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) + } +} + +func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) { + testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) +} + +func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) { + testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) + + if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { + message := escape(specSummary.Failure.ComponentCodeLocation.String()) + details := escape(specSummary.Failure.Message) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + } + if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { + fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName) + } + + durationInMilliseconds := specSummary.RunTime.Seconds() * 1000 + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) +} + +func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName) +} + +func escape(output string) string { + output = strings.Replace(output, "|", "||", -1) + output = strings.Replace(output, "'", "|'", -1) + output = strings.Replace(output, "\n", "|n", -1) + output = strings.Replace(output, "\r", "|r", -1) + output = strings.Replace(output, "[", "|[", -1) + output = strings.Replace(output, "]", "|]", -1) + return output +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go new file mode 100644 index 00000000000..de87732113c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go @@ -0,0 +1,213 @@ +package reporters_test + +import ( + "bytes" + "fmt" + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + . "github.com/onsi/gomega" + "time" +) + +var _ = Describe("TeamCity Reporter", func() { + var ( + buffer bytes.Buffer + reporter Reporter + ) + + BeforeEach(func() { + buffer.Truncate(0) + reporter = reporters.NewTeamCityReporter(&buffer) + reporter.SpecSuiteWillBegin(config.GinkgoConfigType{}, &types.SuiteSummary{ + SuiteDescription: "Foo's test suite", + NumberOfSpecsThatWillBeRun: 1, + }) + }) + + Describe("a passing test", func() { + BeforeEach(func() { + beforeSuite := &types.SetupSummary{ + State: types.SpecStatePassed, + } + reporter.BeforeSuiteDidRun(beforeSuite) + + afterSuite := &types.SetupSummary{ + State: types.SpecStatePassed, + } + reporter.AfterSuiteDidRun(afterSuite) + + spec := &types.SpecSummary{ + ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, + State: types.SpecStatePassed, + RunTime: 5 * time.Second, + } + reporter.SpecWillRun(spec) + reporter.SpecDidComplete(spec) + + reporter.SpecSuiteDidEnd(&types.SuiteSummary{ + NumberOfSpecsThatWillBeRun: 1, + NumberOfFailedSpecs: 0, + RunTime: 10 * time.Second, + }) + }) + + It("should record the test as passing", func() { + actual := buffer.String() + expected := + "##teamcity[testSuiteStarted name='Foo|'s test suite']" + + "##teamcity[testStarted name='A B C']" + + "##teamcity[testFinished name='A B C' duration='5000']" + + "##teamcity[testSuiteFinished name='Foo|'s test suite']" + Ω(actual).Should(Equal(expected)) + }) + }) + + Describe("when the BeforeSuite fails", func() { + var beforeSuite *types.SetupSummary + + BeforeEach(func() { + beforeSuite = &types.SetupSummary{ + State: types.SpecStateFailed, + RunTime: 3 * time.Second, + Failure: types.SpecFailure{ + Message: "failed to setup\n", + ComponentCodeLocation: codelocation.New(0), + }, + } + reporter.BeforeSuiteDidRun(beforeSuite) + + reporter.SpecSuiteDidEnd(&types.SuiteSummary{ + NumberOfSpecsThatWillBeRun: 1, + NumberOfFailedSpecs: 1, + RunTime: 10 * time.Second, + }) + }) + + It("should record the test as having failed", func() { + actual := buffer.String() + expected := fmt.Sprintf( + "##teamcity[testSuiteStarted name='Foo|'s test suite']"+ + "##teamcity[testStarted name='BeforeSuite']"+ + "##teamcity[testFailed name='BeforeSuite' message='%s' details='failed to setup|n']"+ + "##teamcity[testFinished name='BeforeSuite' duration='3000']"+ + "##teamcity[testSuiteFinished name='Foo|'s test suite']", beforeSuite.Failure.ComponentCodeLocation.String(), + ) + Ω(actual).Should(Equal(expected)) + }) + }) + + Describe("when the AfterSuite fails", func() { + var afterSuite *types.SetupSummary + + BeforeEach(func() { + afterSuite = &types.SetupSummary{ + State: types.SpecStateFailed, + RunTime: 3 * time.Second, + Failure: types.SpecFailure{ + Message: "failed to setup\n", + ComponentCodeLocation: codelocation.New(0), + }, + } + reporter.AfterSuiteDidRun(afterSuite) + + reporter.SpecSuiteDidEnd(&types.SuiteSummary{ + NumberOfSpecsThatWillBeRun: 1, + NumberOfFailedSpecs: 1, + RunTime: 10 * time.Second, + }) + }) + + It("should record the test as having failed", func() { + actual := buffer.String() + expected := fmt.Sprintf( + "##teamcity[testSuiteStarted name='Foo|'s test suite']"+ + "##teamcity[testStarted name='AfterSuite']"+ + "##teamcity[testFailed name='AfterSuite' message='%s' details='failed to setup|n']"+ + "##teamcity[testFinished name='AfterSuite' duration='3000']"+ + "##teamcity[testSuiteFinished name='Foo|'s test suite']", afterSuite.Failure.ComponentCodeLocation.String(), + ) + Ω(actual).Should(Equal(expected)) + }) + }) + specStateCases := []struct { + state types.SpecState + message string + }{ + {types.SpecStateFailed, "Failure"}, + {types.SpecStateTimedOut, "Timeout"}, + {types.SpecStatePanicked, "Panic"}, + } + + for _, specStateCase := range specStateCases { + specStateCase := specStateCase + Describe("a failing test", func() { + var spec *types.SpecSummary + BeforeEach(func() { + spec = &types.SpecSummary{ + ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, + State: specStateCase.state, + RunTime: 5 * time.Second, + Failure: types.SpecFailure{ + ComponentCodeLocation: codelocation.New(0), + Message: "I failed", + }, + } + reporter.SpecWillRun(spec) + reporter.SpecDidComplete(spec) + + reporter.SpecSuiteDidEnd(&types.SuiteSummary{ + NumberOfSpecsThatWillBeRun: 1, + NumberOfFailedSpecs: 1, + RunTime: 10 * time.Second, + }) + }) + + It("should record test as failing", func() { + actual := buffer.String() + expected := + fmt.Sprintf("##teamcity[testSuiteStarted name='Foo|'s test suite']"+ + "##teamcity[testStarted name='A B C']"+ + "##teamcity[testFailed name='A B C' message='%s' details='I failed']"+ + "##teamcity[testFinished name='A B C' duration='5000']"+ + "##teamcity[testSuiteFinished name='Foo|'s test suite']", spec.Failure.ComponentCodeLocation.String()) + Ω(actual).Should(Equal(expected)) + }) + }) + } + + for _, specStateCase := range []types.SpecState{types.SpecStatePending, types.SpecStateSkipped} { + specStateCase := specStateCase + Describe("a skipped test", func() { + var spec *types.SpecSummary + BeforeEach(func() { + spec = &types.SpecSummary{ + ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, + State: specStateCase, + RunTime: 5 * time.Second, + } + reporter.SpecWillRun(spec) + reporter.SpecDidComplete(spec) + + reporter.SpecSuiteDidEnd(&types.SuiteSummary{ + NumberOfSpecsThatWillBeRun: 1, + NumberOfFailedSpecs: 0, + RunTime: 10 * time.Second, + }) + }) + + It("should record test as ignored", func() { + actual := buffer.String() + expected := + "##teamcity[testSuiteStarted name='Foo|'s test suite']" + + "##teamcity[testStarted name='A B C']" + + "##teamcity[testIgnored name='A B C']" + + "##teamcity[testFinished name='A B C' duration='5000']" + + "##teamcity[testSuiteFinished name='Foo|'s test suite']" + Ω(actual).Should(Equal(expected)) + }) + }) + } +}) diff --git a/vendor/github.com/onsi/ginkgo/types/code_location.go b/vendor/github.com/onsi/ginkgo/types/code_location.go new file mode 100644 index 00000000000..935a89e136a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/code_location.go @@ -0,0 +1,15 @@ +package types + +import ( + "fmt" +) + +type CodeLocation struct { + FileName string + LineNumber int + FullStackTrace string +} + +func (codeLocation CodeLocation) String() string { + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} diff --git a/vendor/github.com/onsi/ginkgo/types/synchronization.go b/vendor/github.com/onsi/ginkgo/types/synchronization.go new file mode 100644 index 00000000000..fdd6ed5bdf8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/synchronization.go @@ -0,0 +1,30 @@ +package types + +import ( + "encoding/json" +) + +type RemoteBeforeSuiteState int + +const ( + RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota + + RemoteBeforeSuiteStatePending + RemoteBeforeSuiteStatePassed + RemoteBeforeSuiteStateFailed + RemoteBeforeSuiteStateDisappeared +) + +type RemoteBeforeSuiteData struct { + Data []byte + State RemoteBeforeSuiteState +} + +func (r RemoteBeforeSuiteData) ToJSON() []byte { + data, _ := json.Marshal(r) + return data +} + +type RemoteAfterSuiteData struct { + CanRun bool +} diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go new file mode 100644 index 00000000000..baf1bd1c474 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/types.go @@ -0,0 +1,173 @@ +package types + +import ( + "strconv" + "time" +) + +const GINKGO_FOCUS_EXIT_CODE = 197 + +/* +SuiteSummary represents the a summary of the test suite and is passed to both +Reporter.SpecSuiteWillBegin +Reporter.SpecSuiteDidEnd + +this is unfortunate as these two methods should receive different objects. When running in parallel +each node does not deterministically know how many specs it will end up running. + +Unfortunately making such a change would break backward compatibility. + +Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unkown fields +with -1. +*/ +type SuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + // Flaked specs are those that failed initially, but then passed on a + // subsequent try. + NumberOfFlakedSpecs int + RunTime time.Duration +} + +type SpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*SpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s SpecSummary) HasFailureState() bool { + return s.State.IsFailure() +} + +func (s SpecSummary) TimedOut() bool { + return s.State == SpecStateTimedOut +} + +func (s SpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s SpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s SpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s SpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s SpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type SetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type SpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type SpecMeasurement struct { + Name string + Info interface{} + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string + Precision int +} + +func (s SpecMeasurement) PrecisionFmt() string { + if s.Precision == 0 { + return "%f" + } + + str := strconv.Itoa(s.Precision) + + return "%." + str + "f" +} + +type SpecState uint + +const ( + SpecStateInvalid SpecState = iota + + SpecStatePending + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStatePanicked + SpecStateTimedOut +) + +func (state SpecState) IsFailure() bool { + return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed +} + +type SpecComponentType uint + +const ( + SpecComponentTypeInvalid SpecComponentType = iota + + SpecComponentTypeContainer + SpecComponentTypeBeforeSuite + SpecComponentTypeAfterSuite + SpecComponentTypeBeforeEach + SpecComponentTypeJustBeforeEach + SpecComponentTypeAfterEach + SpecComponentTypeIt + SpecComponentTypeMeasure +) + +type FlagType uint + +const ( + FlagTypeNone FlagType = iota + FlagTypeFocused + FlagTypePending +) diff --git a/vendor/github.com/onsi/ginkgo/types/types_suite_test.go b/vendor/github.com/onsi/ginkgo/types/types_suite_test.go new file mode 100644 index 00000000000..b026169c12b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/types_suite_test.go @@ -0,0 +1,13 @@ +package types_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestTypes(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Types Suite") +} diff --git a/vendor/github.com/onsi/ginkgo/types/types_test.go b/vendor/github.com/onsi/ginkgo/types/types_test.go new file mode 100644 index 00000000000..a0e161c8880 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/types_test.go @@ -0,0 +1,99 @@ +package types_test + +import ( + . "github.com/onsi/ginkgo/types" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var specStates = []SpecState{ + SpecStatePassed, + SpecStateTimedOut, + SpecStatePanicked, + SpecStateFailed, + SpecStatePending, + SpecStateSkipped, +} + +func verifySpecSummary(caller func(SpecSummary) bool, trueStates ...SpecState) { + summary := SpecSummary{} + trueStateLookup := map[SpecState]bool{} + for _, state := range trueStates { + trueStateLookup[state] = true + summary.State = state + Ω(caller(summary)).Should(BeTrue()) + } + + for _, state := range specStates { + if trueStateLookup[state] { + continue + } + summary.State = state + Ω(caller(summary)).Should(BeFalse()) + } +} + +var _ = Describe("Types", func() { + Describe("IsFailureState", func() { + It("knows when it is in a failure-like state", func() { + verifySpecSummary(func(summary SpecSummary) bool { + return summary.State.IsFailure() + }, SpecStateTimedOut, SpecStatePanicked, SpecStateFailed) + }) + }) + + Describe("SpecSummary", func() { + It("knows when it is in a failure-like state", func() { + verifySpecSummary(func(summary SpecSummary) bool { + return summary.HasFailureState() + }, SpecStateTimedOut, SpecStatePanicked, SpecStateFailed) + }) + + It("knows when it passed", func() { + verifySpecSummary(func(summary SpecSummary) bool { + return summary.Passed() + }, SpecStatePassed) + }) + + It("knows when it has failed", func() { + verifySpecSummary(func(summary SpecSummary) bool { + return summary.Failed() + }, SpecStateFailed) + }) + + It("knows when it has panicked", func() { + verifySpecSummary(func(summary SpecSummary) bool { + return summary.Panicked() + }, SpecStatePanicked) + }) + + It("knows when it has timed out", func() { + verifySpecSummary(func(summary SpecSummary) bool { + return summary.TimedOut() + }, SpecStateTimedOut) + }) + + It("knows when it is pending", func() { + verifySpecSummary(func(summary SpecSummary) bool { + return summary.Pending() + }, SpecStatePending) + }) + + It("knows when it is skipped", func() { + verifySpecSummary(func(summary SpecSummary) bool { + return summary.Skipped() + }, SpecStateSkipped) + }) + }) + + Describe("SpecMeasurement", func() { + It("knows how to format values when the precision is 0", func() { + Ω(SpecMeasurement{}.PrecisionFmt()).Should(Equal("%f")) + }) + + It("knows how to format the values when the precision is 3", func() { + Ω(SpecMeasurement{Precision: 3}.PrecisionFmt()).Should(Equal("%.3f")) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore new file mode 100644 index 00000000000..720c13cba84 --- /dev/null +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +*.test +. +.idea +gomega.iml diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml new file mode 100644 index 00000000000..8ed1e44a77b --- /dev/null +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -0,0 +1,12 @@ +language: go +go: + - 1.5 + - 1.6.2 + - stable + +install: + - go get -v ./... + - go get github.com/onsi/ginkgo + - go install github.com/onsi/ginkgo/ginkgo + +script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md new file mode 100644 index 00000000000..0c5ede5d828 --- /dev/null +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -0,0 +1,70 @@ +## HEAD + +Improvements: + +- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout. +- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests. +- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel. +- Added `HavePrefix` and `HaveSuffix` matchers. +- `ghttp` can now handle concurrent requests. +- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`. +- Improved `ghttp`'s behavior around failing assertions and panics: + - If a registered handler makes a failing assertion `ghttp` will return `500`. + - If a registered handler panics, `ghttp` will return `500` *and* fail the test. This is new behavior that may cause existing code to break. This code is almost certainly incorrect and creating a false positive. +- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives. +- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher +- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers + +Bug Fixes: +- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure. +- `ContainElement` no longer bails if a passed-in matcher errors. + +## 1.0 (8/2/2014) + +No changes. Dropping "beta" from the version number. + +## 1.0.0-beta (7/8/2014) +Breaking Changes: + +- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead. +- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher + +New Test-Support Features: + +- `ghttp`: supports testing http clients + - Provides a flexible fake http server + - Provides a collection of chainable http handlers that perform assertions. +- `gbytes`: supports making ordered assertions against streams of data + - Provides a `gbytes.Buffer` + - Provides a `Say` matcher to perform ordered assertions against output data +- `gexec`: supports testing external processes + - Provides support for building Go binaries + - Wraps and starts `exec.Cmd` commands + - Makes it easy to assert against stdout and stderr + - Makes it easy to send signals and wait for processes to exit + - Provides an `Exit` matcher to assert against exit code. + +DSL Changes: + +- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs. +- The default timeouts for `Eventually` and `Consistently` are now configurable. + +New Matchers: + +- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map. +- `BeTemporally`: like `BeNumerically` but for `time.Time` +- `HaveKeyWithValue`: asserts a map has a given key with the given value. + +Updated Matchers: + +- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher. +- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed. + +Misc: + +- Start using semantic versioning +- Start maintaining changelog + +Major refactor: + +- Pull out Gomega's internal to `internal` diff --git a/vendor/github.com/onsi/gomega/LICENSE b/vendor/github.com/onsi/gomega/LICENSE new file mode 100644 index 00000000000..9415ee72c17 --- /dev/null +++ b/vendor/github.com/onsi/gomega/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/gomega/README.md b/vendor/github.com/onsi/gomega/README.md new file mode 100644 index 00000000000..d1add5ba0c4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/README.md @@ -0,0 +1,21 @@ +![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png) + +[![Build Status](https://travis-ci.org/onsi/gomega.png)](https://travis-ci.org/onsi/gomega) + +Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers). + +To discuss Gomega and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega). + +## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang + +Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/) + +## Community Matchers + +A collection of community matchers is available on the [wiki](https://github.com/onsi/gomega/wiki). + +## License + +Gomega is MIT-Licensed + +The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license. diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go new file mode 100644 index 00000000000..06355d945bc --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -0,0 +1,277 @@ +/* +Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information. +*/ +package format + +import ( + "fmt" + "reflect" + "strings" + "strconv" +) + +// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects +var MaxDepth = uint(10) + +/* +By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output. + +Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead. + +Note that GoString and String don't always have all the information you need to understand why a test failed! +*/ +var UseStringerRepresentation = false + +//The default indentation string emitted by the format package +var Indent = " " + +var longFormThreshold = 20 + +/* +Generates a formatted matcher success/failure message of the form: + + Expected + + + + +If expected is omited, then the message looks like: + + Expected + + +*/ +func Message(actual interface{}, message string, expected ...interface{}) string { + if len(expected) == 0 { + return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message) + } else { + return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1)) + } +} + +/* +Pretty prints the passed in object at the passed in indentation level. + +Object recurses into deeply nested objects emitting pretty-printed representations of their components. + +Modify format.MaxDepth to control how deep the recursion is allowed to go +Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of +recursing into the object. +*/ +func Object(object interface{}, indentation uint) string { + indent := strings.Repeat(Indent, int(indentation)) + value := reflect.ValueOf(object) + return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation)) +} + +/* +IndentString takes a string and indents each line by the specified amount. +*/ +func IndentString(s string, indentation uint) string { + components := strings.Split(s, "\n") + result := "" + indent := strings.Repeat(Indent, int(indentation)) + for i, component := range components { + result += indent + component + if i < len(components)-1 { + result += "\n" + } + } + + return result +} + +func formatType(object interface{}) string { + t := reflect.TypeOf(object) + if t == nil { + return "nil" + } + switch t.Kind() { + case reflect.Chan: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Ptr: + return fmt.Sprintf("%T | %p", object, object) + case reflect.Slice: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Map: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d", object, v.Len()) + default: + return fmt.Sprintf("%T", object) + } +} + +func formatValue(value reflect.Value, indentation uint) string { + if indentation > MaxDepth { + return "..." + } + + if isNilValue(value) { + return "nil" + } + + if UseStringerRepresentation { + if value.CanInterface() { + obj := value.Interface() + switch x := obj.(type) { + case fmt.GoStringer: + return x.GoString() + case fmt.Stringer: + return x.String() + } + } + } + + switch value.Kind() { + case reflect.Bool: + return fmt.Sprintf("%v", value.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fmt.Sprintf("%v", value.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fmt.Sprintf("%v", value.Uint()) + case reflect.Uintptr: + return fmt.Sprintf("0x%x", value.Uint()) + case reflect.Float32, reflect.Float64: + return fmt.Sprintf("%v", value.Float()) + case reflect.Complex64, reflect.Complex128: + return fmt.Sprintf("%v", value.Complex()) + case reflect.Chan: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Func: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Ptr: + return formatValue(value.Elem(), indentation) + case reflect.Slice: + return formatSlice(value, indentation) + case reflect.String: + return formatString(value.String(), indentation) + case reflect.Array: + return formatSlice(value, indentation) + case reflect.Map: + return formatMap(value, indentation) + case reflect.Struct: + return formatStruct(value, indentation) + case reflect.Interface: + return formatValue(value.Elem(), indentation) + default: + if value.CanInterface() { + return fmt.Sprintf("%#v", value.Interface()) + } else { + return fmt.Sprintf("%#v", value) + } + } +} + +func formatString(object interface{}, indentation uint) string { + if indentation == 1 { + s := fmt.Sprintf("%s", object) + components := strings.Split(s, "\n") + result := "" + for i, component := range components { + if i == 0 { + result += component + } else { + result += Indent + component + } + if i < len(components)-1 { + result += "\n" + } + } + + return fmt.Sprintf("%s", result) + } else { + return fmt.Sprintf("%q", object) + } +} + +func formatSlice(v reflect.Value, indentation uint) string { + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())){ + return formatString(v.Bytes(), indentation) + } + + l := v.Len() + result := make([]string, l) + longest := 0 + for i := 0; i < l; i++ { + result[i] = formatValue(v.Index(i), indentation+1) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } else { + return fmt.Sprintf("[%s]", strings.Join(result, ", ")) + } +} + +func formatMap(v reflect.Value, indentation uint) string { + l := v.Len() + result := make([]string, l) + + longest := 0 + for i, key := range v.MapKeys() { + value := v.MapIndex(key) + result[i] = fmt.Sprintf("%s: %s", formatValue(key, 0), formatValue(value, indentation+1)) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } else { + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) + } +} + +func formatStruct(v reflect.Value, indentation uint) string { + t := v.Type() + + l := v.NumField() + result := []string{} + longest := 0 + for i := 0; i < l; i++ { + structField := t.Field(i) + fieldEntry := v.Field(i) + representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1)) + result = append(result, representation) + if len(representation) > longest { + longest = len(representation) + } + } + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } else { + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) + } +} + +func isNilValue(a reflect.Value) bool { + switch a.Kind() { + case reflect.Invalid: + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return a.IsNil() + } + + return false +} + +/* +Returns true when the string is entirely made of printable runes, false otherwise. +*/ +func isPrintableString(str string) bool { + for _, runeValue := range str { + if !strconv.IsPrint(runeValue) { + return false + } + } + return true +} diff --git a/vendor/github.com/onsi/gomega/format/format_suite_test.go b/vendor/github.com/onsi/gomega/format/format_suite_test.go new file mode 100644 index 00000000000..8e65a95292d --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format_suite_test.go @@ -0,0 +1,13 @@ +package format_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestFormat(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Format Suite") +} diff --git a/vendor/github.com/onsi/gomega/format/format_test.go b/vendor/github.com/onsi/gomega/format/format_test.go new file mode 100644 index 00000000000..1391fe7c5f2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format_test.go @@ -0,0 +1,455 @@ +package format_test + +import ( + "fmt" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" + "strings" +) + +//recursive struct + +type StringAlias string +type ByteAlias []byte +type IntAlias int + +type AStruct struct { + Exported string +} + +type SimpleStruct struct { + Name string + Enumeration int + Veritas bool + Data []byte + secret uint32 +} + +type ComplexStruct struct { + Strings []string + SimpleThings []*SimpleStruct + DataMaps map[int]ByteAlias +} + +type SecretiveStruct struct { + boolValue bool + intValue int + uintValue uint + uintptrValue uintptr + floatValue float32 + complexValue complex64 + chanValue chan bool + funcValue func() + pointerValue *int + sliceValue []string + byteSliceValue []byte + stringValue string + arrValue [3]int + byteArrValue [3]byte + mapValue map[string]int + structValue AStruct + interfaceValue interface{} +} + +type GoStringer struct { +} + +func (g GoStringer) GoString() string { + return "go-string" +} + +func (g GoStringer) String() string { + return "string" +} + +type Stringer struct { +} + +func (g Stringer) String() string { + return "string" +} + +var _ = Describe("Format", func() { + match := func(typeRepresentation string, valueRepresentation string, args ...interface{}) types.GomegaMatcher { + if len(args) > 0 { + valueRepresentation = fmt.Sprintf(valueRepresentation, args...) + } + return Equal(fmt.Sprintf("%s<%s>: %s", Indent, typeRepresentation, valueRepresentation)) + } + + matchRegexp := func(typeRepresentation string, valueRepresentation string, args ...interface{}) types.GomegaMatcher { + if len(args) > 0 { + valueRepresentation = fmt.Sprintf(valueRepresentation, args...) + } + return MatchRegexp(fmt.Sprintf("%s<%s>: %s", Indent, typeRepresentation, valueRepresentation)) + } + + hashMatchingRegexp := func(entries ...string) string { + entriesSwitch := "(" + strings.Join(entries, "|") + ")" + arr := make([]string, len(entries)) + for i := range arr { + arr[i] = entriesSwitch + } + return "{" + strings.Join(arr, ", ") + "}" + } + + Describe("Message", func() { + Context("with only an actual value", func() { + It("should print out an indented formatted representation of the value and the message", func() { + Ω(Message(3, "to be three.")).Should(Equal("Expected\n : 3\nto be three.")) + }) + }) + + Context("with an actual and an expected value", func() { + It("should print out an indented formatted representatino of both values, and the message", func() { + Ω(Message(3, "to equal", 4)).Should(Equal("Expected\n : 3\nto equal\n : 4")) + }) + }) + }) + + Describe("IndentString", func() { + It("should indent the string", func() { + Ω(IndentString("foo\n bar\nbaz", 2)).Should(Equal(" foo\n bar\n baz")) + }) + }) + + Describe("Object", func() { + Describe("formatting boolean values", func() { + It("should give the type and format values correctly", func() { + Ω(Object(true, 1)).Should(match("bool", "true")) + Ω(Object(false, 1)).Should(match("bool", "false")) + }) + }) + + Describe("formatting numbers", func() { + It("should give the type and format values correctly", func() { + Ω(Object(int(3), 1)).Should(match("int", "3")) + Ω(Object(int8(3), 1)).Should(match("int8", "3")) + Ω(Object(int16(3), 1)).Should(match("int16", "3")) + Ω(Object(int32(3), 1)).Should(match("int32", "3")) + Ω(Object(int64(3), 1)).Should(match("int64", "3")) + + Ω(Object(uint(3), 1)).Should(match("uint", "3")) + Ω(Object(uint8(3), 1)).Should(match("uint8", "3")) + Ω(Object(uint16(3), 1)).Should(match("uint16", "3")) + Ω(Object(uint32(3), 1)).Should(match("uint32", "3")) + Ω(Object(uint64(3), 1)).Should(match("uint64", "3")) + }) + + It("should handle uintptr differently", func() { + Ω(Object(uintptr(3), 1)).Should(match("uintptr", "0x3")) + }) + }) + + Describe("formatting channels", func() { + It("should give the type and format values correctly", func() { + c := make(chan<- bool, 3) + c <- true + c <- false + Ω(Object(c, 1)).Should(match("chan<- bool | len:2, cap:3", "%v", c)) + }) + }) + + Describe("formatting strings", func() { + It("should give the type and format values correctly", func() { + s := "a\nb\nc" + Ω(Object(s, 1)).Should(match("string", `a + b + c`)) + }) + }) + + Describe("formatting []byte slices", func() { + Context("when the slice is made of printable bytes", func () { + It("should present it as string", func() { + b := []byte("a b c") + Ω(Object(b, 1)).Should(matchRegexp(`\[\]uint8 \| len:5, cap:\d+`, `a b c`)) + }) + }) + Context("when the slice contains non-printable bytes", func () { + It("should present it as slice", func() { + b := []byte("a b c\n\x01\x02\x03\xff\x1bH") + Ω(Object(b, 1)).Should(matchRegexp(`\[\]uint8 \| len:12, cap:\d+`, `\[97, 32, 98, 32, 99, 10, 1, 2, 3, 255, 27, 72\]`)) + }) + }) + }) + + Describe("formatting functions", func() { + It("should give the type and format values correctly", func() { + f := func(a string, b []int) ([]byte, error) { + return []byte("abc"), nil + } + Ω(Object(f, 1)).Should(match("func(string, []int) ([]uint8, error)", "%v", f)) + }) + }) + + Describe("formatting pointers", func() { + It("should give the type and dereference the value to format it correctly", func() { + a := 3 + Ω(Object(&a, 1)).Should(match(fmt.Sprintf("*int | %p", &a), "3")) + }) + + Context("when there are pointers to pointers...", func() { + It("should recursively deference the pointer until it gets to a value", func() { + a := 3 + var b *int + var c **int + var d ***int + b = &a + c = &b + d = &c + + Ω(Object(d, 1)).Should(match(fmt.Sprintf("***int | %p", d), "3")) + }) + }) + + Context("when the pointer points to nil", func() { + It("should say nil and not explode", func() { + var a *AStruct + Ω(Object(a, 1)).Should(match("*format_test.AStruct | 0x0", "nil")) + }) + }) + }) + + Describe("formatting arrays", func() { + It("should give the type and format values correctly", func() { + w := [3]string{"Jed Bartlet", "Toby Ziegler", "CJ Cregg"} + Ω(Object(w, 1)).Should(match("[3]string", `["Jed Bartlet", "Toby Ziegler", "CJ Cregg"]`)) + }) + + Context("with byte arrays", func() { + It("should give the type and format values correctly", func() { + w := [3]byte{17, 28, 19} + Ω(Object(w, 1)).Should(match("[3]uint8", `[17, 28, 19]`)) + }) + }) + }) + + Describe("formatting slices", func() { + It("should include the length and capacity in the type information", func() { + s := make([]bool, 3, 4) + Ω(Object(s, 1)).Should(match("[]bool | len:3, cap:4", "[false, false, false]")) + }) + + Context("when the slice contains long entries", func() { + It("should format the entries with newlines", func() { + w := []string{"Josiah Edward Bartlet", "Toby Ziegler", "CJ Cregg"} + expected := `[ + "Josiah Edward Bartlet", + "Toby Ziegler", + "CJ Cregg", + ]` + Ω(Object(w, 1)).Should(match("[]string | len:3, cap:3", expected)) + }) + }) + }) + + Describe("formatting maps", func() { + It("should include the length in the type information", func() { + m := make(map[int]bool, 5) + m[3] = true + m[4] = false + Ω(Object(m, 1)).Should(matchRegexp(`map\[int\]bool \| len:2`, hashMatchingRegexp("3: true", "4: false"))) + }) + + Context("when the slice contains long entries", func() { + It("should format the entries with newlines", func() { + m := map[string][]byte{} + m["Josiah Edward Bartlet"] = []byte("Martin Sheen") + m["Toby Ziegler"] = []byte("Richard Schiff") + m["CJ Cregg"] = []byte("Allison Janney") + expected := `{ + ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), + ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), + ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), + }` + Ω(Object(m, 1)).Should(matchRegexp(`map\[string\]\[\]uint8 \| len:3`, expected)) + }) + }) + }) + + Describe("formatting structs", func() { + It("should include the struct name and the field names", func() { + s := SimpleStruct{ + Name: "Oswald", + Enumeration: 17, + Veritas: true, + Data: []byte("datum"), + secret: 1983, + } + + Ω(Object(s, 1)).Should(match("format_test.SimpleStruct", `{Name: "Oswald", Enumeration: 17, Veritas: true, Data: "datum", secret: 1983}`)) + }) + + Context("when the struct contains long entries", func() { + It("should format the entries with new lines", func() { + s := &SimpleStruct{ + Name: "Mithrandir Gandalf Greyhame", + Enumeration: 2021, + Veritas: true, + Data: []byte("wizard"), + secret: 3, + } + + Ω(Object(s, 1)).Should(match(fmt.Sprintf("*format_test.SimpleStruct | %p", s), `{ + Name: "Mithrandir Gandalf Greyhame", + Enumeration: 2021, + Veritas: true, + Data: "wizard", + secret: 3, + }`)) + }) + }) + }) + + Describe("formatting nil values", func() { + It("should print out nil", func() { + Ω(Object(nil, 1)).Should(match("nil", "nil")) + var typedNil *AStruct + Ω(Object(typedNil, 1)).Should(match("*format_test.AStruct | 0x0", "nil")) + var c chan<- bool + Ω(Object(c, 1)).Should(match("chan<- bool | len:0, cap:0", "nil")) + var s []string + Ω(Object(s, 1)).Should(match("[]string | len:0, cap:0", "nil")) + var m map[string]bool + Ω(Object(m, 1)).Should(match("map[string]bool | len:0", "nil")) + }) + }) + + Describe("formatting aliased types", func() { + It("should print out the correct alias type", func() { + Ω(Object(StringAlias("alias"), 1)).Should(match("format_test.StringAlias", `alias`)) + Ω(Object(ByteAlias("alias"), 1)).Should(matchRegexp(`format_test\.ByteAlias \| len:5, cap:\d+`, `alias`)) + Ω(Object(IntAlias(3), 1)).Should(match("format_test.IntAlias", "3")) + }) + }) + + Describe("handling nested things", func() { + It("should produce a correctly nested representation", func() { + s := ComplexStruct{ + Strings: []string{"lots", "of", "short", "strings"}, + SimpleThings: []*SimpleStruct{ + {"short", 7, true, []byte("succinct"), 17}, + {"something longer", 427, true, []byte("designed to wrap around nicely"), 30}, + }, + DataMaps: map[int]ByteAlias{ + 17: ByteAlias("some substantially longer chunks of data"), + 1138: ByteAlias("that should make things wrap"), + }, + } + expected := `{ + Strings: \["lots", "of", "short", "strings"\], + SimpleThings: \[ + {Name: "short", Enumeration: 7, Veritas: true, Data: "succinct", secret: 17}, + { + Name: "something longer", + Enumeration: 427, + Veritas: true, + Data: "designed to wrap around nicely", + secret: 30, + }, + \], + DataMaps: { + (17: "some substantially longer chunks of data"|1138: "that should make things wrap"), + (17: "some substantially longer chunks of data"|1138: "that should make things wrap"), + }, + }` + Ω(Object(s, 1)).Should(matchRegexp(`format_test\.ComplexStruct`, expected)) + }) + }) + }) + + Describe("Handling unexported fields in structs", func() { + It("should handle all the various types correctly", func() { + a := int(5) + s := SecretiveStruct{ + boolValue: true, + intValue: 3, + uintValue: 4, + uintptrValue: 5, + floatValue: 6.0, + complexValue: complex(5.0, 3.0), + chanValue: make(chan bool, 2), + funcValue: func() {}, + pointerValue: &a, + sliceValue: []string{"string", "slice"}, + byteSliceValue: []byte("bytes"), + stringValue: "a string", + arrValue: [3]int{11, 12, 13}, + byteArrValue: [3]byte{17, 20, 32}, + mapValue: map[string]int{"a key": 20, "b key": 30}, + structValue: AStruct{"exported"}, + interfaceValue: map[string]int{"a key": 17}, + } + + expected := fmt.Sprintf(`{ + boolValue: true, + intValue: 3, + uintValue: 4, + uintptrValue: 0x5, + floatValue: 6, + complexValue: \(5\+3i\), + chanValue: %p, + funcValue: %p, + pointerValue: 5, + sliceValue: \["string", "slice"\], + byteSliceValue: "bytes", + stringValue: "a string", + arrValue: \[11, 12, 13\], + byteArrValue: \[17, 20, 32\], + mapValue: %s, + structValue: {Exported: "exported"}, + interfaceValue: {"a key": 17}, + }`, s.chanValue, s.funcValue, hashMatchingRegexp(`"a key": 20`, `"b key": 30`)) + + Ω(Object(s, 1)).Should(matchRegexp(`format_test\.SecretiveStruct`, expected)) + }) + }) + + Describe("Handling interfaces", func() { + It("should unpack the interface", func() { + outerHash := map[string]interface{}{} + innerHash := map[string]int{} + + innerHash["inner"] = 3 + outerHash["integer"] = 2 + outerHash["map"] = innerHash + + expected := hashMatchingRegexp(`"integer": 2`, `"map": {"inner": 3}`) + Ω(Object(outerHash, 1)).Should(matchRegexp(`map\[string\]interface {} \| len:2`, expected)) + }) + }) + + Describe("Handling recursive things", func() { + It("should not go crazy...", func() { + m := map[string]interface{}{} + m["integer"] = 2 + m["map"] = m + Ω(Object(m, 1)).Should(ContainSubstring("...")) + }) + }) + + Describe("When instructed to use the Stringer representation", func() { + BeforeEach(func() { + UseStringerRepresentation = true + }) + + AfterEach(func() { + UseStringerRepresentation = false + }) + + Context("when passed a GoStringer", func() { + It("should use what GoString() returns", func() { + Ω(Object(GoStringer{}, 1)).Should(ContainSubstring(": go-string")) + }) + }) + + Context("when passed a stringer", func() { + It("should use what String() returns", func() { + Ω(Object(Stringer{}, 1)).Should(ContainSubstring(": string")) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go new file mode 100644 index 00000000000..78bd188c072 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -0,0 +1,335 @@ +/* +Gomega is the Ginkgo BDD-style testing framework's preferred matcher library. + +The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/ + +Gomega on Github: http://github.com/onsi/gomega + +Learn more about Ginkgo online: http://onsi.github.io/ginkgo + +Ginkgo on Github: http://github.com/onsi/ginkgo + +Gomega is MIT-Licensed +*/ +package gomega + +import ( + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/assertion" + "github.com/onsi/gomega/internal/asyncassertion" + "github.com/onsi/gomega/internal/testingtsupport" + "github.com/onsi/gomega/types" +) + +const GOMEGA_VERSION = "1.0" + +const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. +If you're using Ginkgo then you probably forgot to put your assertion in an It(). +Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT(). +` + +var globalFailHandler types.GomegaFailHandler + +var defaultEventuallyTimeout = time.Second +var defaultEventuallyPollingInterval = 10 * time.Millisecond +var defaultConsistentlyDuration = 100 * time.Millisecond +var defaultConsistentlyPollingInterval = 10 * time.Millisecond + +//RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails +//the fail handler passed into RegisterFailHandler is called. +func RegisterFailHandler(handler types.GomegaFailHandler) { + globalFailHandler = handler +} + +//RegisterTestingT connects Gomega to Golang's XUnit style +//Testing.T tests. You'll need to call this at the top of each XUnit style test: +// +// func TestFarmHasCow(t *testing.T) { +// RegisterTestingT(t) +// +// f := farm.New([]string{"Cow", "Horse"}) +// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +// +// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to +// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests +// in parallel as the global fail handler cannot point to more than one testing.T at a time. +// +// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). +func RegisterTestingT(t types.GomegaTestingT) { + RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t)) +} + +//InterceptGomegaHandlers runs a given callback and returns an array of +//failure messages generated by any Gomega assertions within the callback. +// +//This is accomplished by temporarily replacing the *global* fail handler +//with a fail handler that simply annotates failures. The original fail handler +//is reset when InterceptGomegaFailures returns. +// +//This is most useful when testing custom matchers, but can also be used to check +//on a value using a Gomega assertion without causing a test failure. +func InterceptGomegaFailures(f func()) []string { + originalHandler := globalFailHandler + failures := []string{} + RegisterFailHandler(func(message string, callerSkip ...int) { + failures = append(failures, message) + }) + f() + RegisterFailHandler(originalHandler) + return failures +} + +//Ω wraps an actual value allowing assertions to be made on it: +// Ω("foo").Should(Equal("foo")) +// +//If Ω is passed more than one argument it will pass the *first* argument to the matcher. +//All subsequent arguments will be required to be nil/zero. +// +//This is convenient if you want to make an assertion on a method/function that returns +//a value and an error - a common patter in Go. +// +//For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +//Then: +// Ω(MyAmazingThing()).Should(Equal(3)) +//Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +//Ω and Expect are identical +func Ω(actual interface{}, extra ...interface{}) GomegaAssertion { + return ExpectWithOffset(0, actual, extra...) +} + +//Expect wraps an actual value allowing assertions to be made on it: +// Expect("foo").To(Equal("foo")) +// +//If Expect is passed more than one argument it will pass the *first* argument to the matcher. +//All subsequent arguments will be required to be nil/zero. +// +//This is convenient if you want to make an assertion on a method/function that returns +//a value and an error - a common patter in Go. +// +//For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +//Then: +// Expect(MyAmazingThing()).Should(Equal(3)) +//Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +//Expect and Ω are identical +func Expect(actual interface{}, extra ...interface{}) GomegaAssertion { + return ExpectWithOffset(0, actual, extra...) +} + +//ExpectWithOffset wraps an actual value allowing assertions to be made on it: +// ExpectWithOffset(1, "foo").To(Equal("foo")) +// +//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument +//this is used to modify the call-stack offset when computing line numbers. +// +//This is most useful in helper functions that make assertions. If you want Gomega's +//error message to refer to the calling line in the test (as opposed to the line in the helper function) +//set the first argument of `ExpectWithOffset` appropriately. +func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + return assertion.New(actual, globalFailHandler, offset, extra...) +} + +//Eventually wraps an actual value allowing assertions to be made on it. +//The assertion is tried periodically until it passes or a timeout occurs. +// +//Both the timeout and polling interval are configurable as optional arguments: +//The first optional argument is the timeout +//The second optional argument is the polling interval +// +//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +//last case they are interpreted as seconds. +// +//If Eventually is passed an actual that is a function taking no arguments and returning at least one value, +//then Eventually will call the function periodically and try the matcher against the function's first return value. +// +//Example: +// +// Eventually(func() int { +// return thingImPolling.Count() +// }).Should(BeNumerically(">=", 17)) +// +//Note that this example could be rewritten: +// +// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17)) +// +//If the function returns more than one value, then Eventually will pass the first value to the matcher and +//assert that all other values are nil/zero. +//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. +// +//For example, consider a method that returns a value and an error: +// func FetchFromDB() (string, error) +// +//Then +// Eventually(FetchFromDB).Should(Equal("hasselhoff")) +// +//Will pass only if the the returned error is nil and the returned string passes the matcher. +// +//Eventually's default timeout is 1 second, and its default polling interval is 10ms +func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + return EventuallyWithOffset(0, actual, intervals...) +} + +//EventuallyWithOffset operates like Eventually but takes an additional +//initial argument to indicate an offset in the call stack. This is useful when building helper +//functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultEventuallyTimeout + pollingInterval := defaultEventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) +} + +//Consistently wraps an actual value allowing assertions to be made on it. +//The assertion is tried periodically and is required to pass for a period of time. +// +//Both the total time and polling interval are configurable as optional arguments: +//The first optional argument is the duration that Consistently will run for +//The second optional argument is the polling interval +// +//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +//last case they are interpreted as seconds. +// +//If Consistently is passed an actual that is a function taking no arguments and returning at least one value, +//then Consistently will call the function periodically and try the matcher against the function's first return value. +// +//If the function returns more than one value, then Consistently will pass the first value to the matcher and +//assert that all other values are nil/zero. +//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. +// +//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem. +//For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: +// +// Consistently(channel).ShouldNot(Receive()) +// +//Consistently's default duration is 100ms, and its default polling interval is 10ms +func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + return ConsistentlyWithOffset(0, actual, intervals...) +} + +//ConsistentlyWithOffset operates like Consistnetly but takes an additional +//initial argument to indicate an offset in the call stack. This is useful when building helper +//functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { + if globalFailHandler == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultConsistentlyDuration + pollingInterval := defaultConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) +} + +//Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. +func SetDefaultEventuallyTimeout(t time.Duration) { + defaultEventuallyTimeout = t +} + +//Set the default polling interval for Eventually. +func SetDefaultEventuallyPollingInterval(t time.Duration) { + defaultEventuallyPollingInterval = t +} + +//Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long. +func SetDefaultConsistentlyDuration(t time.Duration) { + defaultConsistentlyDuration = t +} + +//Set the default polling interval for Consistently. +func SetDefaultConsistentlyPollingInterval(t time.Duration) { + defaultConsistentlyPollingInterval = t +} + +//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against +//the matcher passed to the Should and ShouldNot methods. +// +//Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to +//fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more +//descriptive +// +//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed. +// +//Example: +// +// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.") +type GomegaAsyncAssertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher +//passed to the Should/ShouldNot and To/ToNot/NotTo methods. +// +//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect +//though this is not enforced. +// +//All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() +//and is used to annotate failure messages. +// +//All methods return a bool that is true if hte assertion passed and false if it failed. +// +//Example: +// +// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +type GomegaAssertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + + To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it +type OmegaMatcher types.GomegaMatcher + +func toDuration(input interface{}) time.Duration { + duration, ok := input.(time.Duration) + if ok { + return duration + } + + value := reflect.ValueOf(input) + kind := reflect.TypeOf(input).Kind() + + if reflect.Int <= kind && kind <= reflect.Int64 { + return time.Duration(value.Int()) * time.Second + } else if reflect.Uint <= kind && kind <= reflect.Uint64 { + return time.Duration(value.Uint()) * time.Second + } else if reflect.Float32 <= kind && kind <= reflect.Float64 { + return time.Duration(value.Float() * float64(time.Second)) + } else if reflect.String == kind { + duration, err := time.ParseDuration(value.String()) + if err != nil { + panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + } + return duration + } + + panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go new file mode 100644 index 00000000000..b73673f21ed --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go @@ -0,0 +1,98 @@ +package assertion + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/types" +) + +type Assertion struct { + actualInput interface{} + fail types.GomegaFailHandler + offset int + extra []interface{} +} + +func New(actualInput interface{}, fail types.GomegaFailHandler, offset int, extra ...interface{}) *Assertion { + return &Assertion{ + actualInput: actualInput, + fail: fail, + offset: offset, + extra: extra, + } +} + +func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + matches, err := matcher.Match(assertion.actualInput) + description := assertion.buildDescription(optionalDescription...) + if err != nil { + assertion.fail(description+err.Error(), 2+assertion.offset) + return false + } + if matches != desiredMatch { + var message string + if desiredMatch { + message = matcher.FailureMessage(assertion.actualInput) + } else { + message = matcher.NegatedFailureMessage(assertion.actualInput) + } + assertion.fail(description+message, 2+assertion.offset) + return false + } + + return true +} + +func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool { + success, message := vetExtras(assertion.extra) + if success { + return true + } + + description := assertion.buildDescription(optionalDescription...) + assertion.fail(description+message, 2+assertion.offset) + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go new file mode 100644 index 00000000000..dae47a48bf9 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go @@ -0,0 +1,13 @@ +package assertion_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestAssertion(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Assertion Suite") +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go new file mode 100644 index 00000000000..c03b7a320ce --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion_test.go @@ -0,0 +1,252 @@ +package assertion_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/internal/assertion" + "github.com/onsi/gomega/internal/fakematcher" +) + +var _ = Describe("Assertion", func() { + var ( + a *Assertion + failureMessage string + failureCallerSkip int + matcher *fakematcher.FakeMatcher + ) + + input := "The thing I'm testing" + + var fakeFailHandler = func(message string, callerSkip ...int) { + failureMessage = message + if len(callerSkip) == 1 { + failureCallerSkip = callerSkip[0] + } + } + + BeforeEach(func() { + matcher = &fakematcher.FakeMatcher{} + failureMessage = "" + failureCallerSkip = 0 + a = New(input, fakeFailHandler, 1) + }) + + Context("when called", func() { + It("should pass the provided input value to the matcher", func() { + a.Should(matcher) + + Ω(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.ShouldNot(matcher) + + Ω(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.To(matcher) + + Ω(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.ToNot(matcher) + + Ω(matcher.ReceivedActual).Should(Equal(input)) + matcher.ReceivedActual = "" + + a.NotTo(matcher) + + Ω(matcher.ReceivedActual).Should(Equal(input)) + }) + }) + + Context("when the matcher succeeds", func() { + BeforeEach(func() { + matcher.MatchesToReturn = true + matcher.ErrToReturn = nil + }) + + Context("and a positive assertion is being made", func() { + It("should not call the failure callback", func() { + a.Should(matcher) + Ω(failureMessage).Should(Equal("")) + }) + + It("should be true", func() { + Ω(a.Should(matcher)).Should(BeTrue()) + }) + }) + + Context("and a negative assertion is being made", func() { + It("should call the failure callback", func() { + a.ShouldNot(matcher) + Ω(failureMessage).Should(Equal("negative: The thing I'm testing")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + + It("should be false", func() { + Ω(a.ShouldNot(matcher)).Should(BeFalse()) + }) + }) + }) + + Context("when the matcher fails", func() { + BeforeEach(func() { + matcher.MatchesToReturn = false + matcher.ErrToReturn = nil + }) + + Context("and a positive assertion is being made", func() { + It("should call the failure callback", func() { + a.Should(matcher) + Ω(failureMessage).Should(Equal("positive: The thing I'm testing")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + + It("should be false", func() { + Ω(a.Should(matcher)).Should(BeFalse()) + }) + }) + + Context("and a negative assertion is being made", func() { + It("should not call the failure callback", func() { + a.ShouldNot(matcher) + Ω(failureMessage).Should(Equal("")) + }) + + It("should be true", func() { + Ω(a.ShouldNot(matcher)).Should(BeTrue()) + }) + }) + }) + + Context("When reporting a failure", func() { + BeforeEach(func() { + matcher.MatchesToReturn = false + matcher.ErrToReturn = nil + }) + + Context("and there is an optional description", func() { + It("should append the description to the failure message", func() { + a.Should(matcher, "A description") + Ω(failureMessage).Should(Equal("A description\npositive: The thing I'm testing")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + }) + + Context("and there are multiple arguments to the optional description", func() { + It("should append the formatted description to the failure message", func() { + a.Should(matcher, "A description of [%d]", 3) + Ω(failureMessage).Should(Equal("A description of [3]\npositive: The thing I'm testing")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + }) + }) + + Context("When the matcher returns an error", func() { + BeforeEach(func() { + matcher.ErrToReturn = errors.New("Kaboom!") + }) + + Context("and a positive assertion is being made", func() { + It("should call the failure callback", func() { + matcher.MatchesToReturn = true + a.Should(matcher) + Ω(failureMessage).Should(Equal("Kaboom!")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + }) + + Context("and a negative assertion is being made", func() { + It("should call the failure callback", func() { + matcher.MatchesToReturn = false + a.ShouldNot(matcher) + Ω(failureMessage).Should(Equal("Kaboom!")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + }) + + It("should always be false", func() { + Ω(a.Should(matcher)).Should(BeFalse()) + Ω(a.ShouldNot(matcher)).Should(BeFalse()) + }) + }) + + Context("when there are extra parameters", func() { + It("(a simple example)", func() { + Ω(func() (string, int, error) { + return "foo", 0, nil + }()).Should(Equal("foo")) + }) + + Context("when the parameters are all nil or zero", func() { + It("should invoke the matcher", func() { + matcher.MatchesToReturn = true + matcher.ErrToReturn = nil + + var typedNil []string + a = New(input, fakeFailHandler, 1, 0, nil, typedNil) + + result := a.Should(matcher) + Ω(result).Should(BeTrue()) + Ω(matcher.ReceivedActual).Should(Equal(input)) + + Ω(failureMessage).Should(BeZero()) + }) + }) + + Context("when any of the parameters are not nil or zero", func() { + It("should call the failure callback", func() { + matcher.MatchesToReturn = false + matcher.ErrToReturn = nil + + a = New(input, fakeFailHandler, 1, errors.New("foo")) + result := a.Should(matcher) + Ω(result).Should(BeFalse()) + Ω(matcher.ReceivedActual).Should(BeZero(), "The matcher doesn't even get called") + Ω(failureMessage).Should(ContainSubstring("foo")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 1) + result = a.ShouldNot(matcher) + Ω(result).Should(BeFalse()) + Ω(failureMessage).Should(ContainSubstring("1")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) + result = a.To(matcher) + Ω(result).Should(BeFalse()) + Ω(failureMessage).Should(ContainSubstring("foo")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) + result = a.ToNot(matcher) + Ω(result).Should(BeFalse()) + Ω(failureMessage).Should(ContainSubstring("foo")) + failureMessage = "" + + a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) + result = a.NotTo(matcher) + Ω(result).Should(BeFalse()) + Ω(failureMessage).Should(ContainSubstring("foo")) + Ω(failureCallerSkip).Should(Equal(3)) + }) + }) + }) + + Context("Making an assertion without a registered fail handler", func() { + It("should panic", func() { + defer func() { + e := recover() + RegisterFailHandler(Fail) + if e == nil { + Fail("expected a panic to have occurred") + } + }() + + RegisterFailHandler(nil) + Ω(true).Should(BeTrue()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go new file mode 100644 index 00000000000..bce0853006a --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go @@ -0,0 +1,189 @@ +package asyncassertion + +import ( + "errors" + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AsyncAssertionType uint + +const ( + AsyncAssertionTypeEventually AsyncAssertionType = iota + AsyncAssertionTypeConsistently +) + +type AsyncAssertion struct { + asyncType AsyncAssertionType + actualInput interface{} + timeoutInterval time.Duration + pollingInterval time.Duration + fail types.GomegaFailHandler + offset int +} + +func New(asyncType AsyncAssertionType, actualInput interface{}, fail types.GomegaFailHandler, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { + actualType := reflect.TypeOf(actualInput) + if actualType.Kind() == reflect.Func { + if actualType.NumIn() != 0 || actualType.NumOut() == 0 { + panic("Expected a function with no arguments and one or more return values.") + } + } + + return &AsyncAssertion{ + asyncType: asyncType, + actualInput: actualInput, + fail: fail, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + offset: offset, + } +} + +func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + return assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *AsyncAssertion) actualInputIsAFunction() bool { + actualType := reflect.TypeOf(assertion.actualInput) + return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0 +} + +func (assertion *AsyncAssertion) pollActual() (interface{}, error) { + if assertion.actualInputIsAFunction() { + values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{}) + + extras := []interface{}{} + for _, value := range values[1:] { + extras = append(extras, value.Interface()) + } + + success, message := vetExtras(extras) + + if !success { + return nil, errors.New(message) + } + + return values[0].Interface(), nil + } + + return assertion.actualInput, nil +} + +func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualInputIsAFunction() { + return true + } + + return oraclematcher.MatchMayChangeInTheFuture(matcher, value) +} + +func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + timer := time.Now() + timeout := time.After(assertion.timeoutInterval) + + description := assertion.buildDescription(optionalDescription...) + + var matches bool + var err error + mayChange := true + value, err := assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + + fail := func(preamble string) { + errMsg := "" + message := "" + if err != nil { + errMsg = "Error: " + err.Error() + } else { + if desiredMatch { + message = matcher.FailureMessage(value) + } else { + message = matcher.NegatedFailureMessage(value) + } + } + assertion.fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) + } + + if assertion.asyncType == AsyncAssertionTypeEventually { + for { + if err == nil && matches == desiredMatch { + return true + } + + if !mayChange { + fail("No future change is possible. Bailing out early") + return false + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + fail("Timed out") + return false + } + } + } else if assertion.asyncType == AsyncAssertionTypeConsistently { + for { + if !(err == nil && matches == desiredMatch) { + fail("Failed") + return false + } + + if !mayChange { + return true + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + return true + } + } + } + + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go new file mode 100644 index 00000000000..bdb0c3d2202 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go @@ -0,0 +1,13 @@ +package asyncassertion_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestAsyncAssertion(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "AsyncAssertion Suite") +} diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go new file mode 100644 index 00000000000..3d7e3489d1e --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go @@ -0,0 +1,345 @@ +package asyncassertion_test + +import ( + "errors" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/internal/asyncassertion" +) + +var _ = Describe("Async Assertion", func() { + var ( + failureMessage string + callerSkip int + ) + + var fakeFailHandler = func(message string, skip ...int) { + failureMessage = message + callerSkip = skip[0] + } + + BeforeEach(func() { + failureMessage = "" + callerSkip = 0 + }) + + Describe("Eventually", func() { + Context("the positive case", func() { + It("should poll the function and matcher", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() int { + counter++ + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(BeNumerically("==", 5)) + Ω(failureMessage).Should(BeZero()) + }) + + It("should continue when the matcher errors", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() interface{} { + counter++ + if counter == 5 { + return "not-a-number" //this should cause the matcher to error + } + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(BeNumerically("==", 5), "My description %d", 2) + + Ω(failureMessage).Should(ContainSubstring("Timed out after")) + Ω(failureMessage).Should(ContainSubstring("My description 2")) + Ω(callerSkip).Should(Equal(4)) + }) + + It("should be able to timeout", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() int { + counter++ + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(BeNumerically(">", 100), "My description %d", 2) + + Ω(counter).Should(BeNumerically(">", 8)) + Ω(counter).Should(BeNumerically("<=", 10)) + Ω(failureMessage).Should(ContainSubstring("Timed out after")) + Ω(failureMessage).Should(MatchRegexp(`\: \d`), "Should pass the correct value to the matcher message formatter.") + Ω(failureMessage).Should(ContainSubstring("My description 2")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + + Context("the negative case", func() { + It("should poll the function and matcher", func() { + counter := 0 + a := New(AsyncAssertionTypeEventually, func() int { + counter += 1 + return counter + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(BeNumerically("<", 3)) + + Ω(counter).Should(Equal(3)) + Ω(failureMessage).Should(BeZero()) + }) + + It("should timeout when the matcher errors", func() { + a := New(AsyncAssertionTypeEventually, func() interface{} { + return 0 //this should cause the matcher to error + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(HaveLen(0), "My description %d", 2) + + Ω(failureMessage).Should(ContainSubstring("Timed out after")) + Ω(failureMessage).Should(ContainSubstring("Error:")) + Ω(failureMessage).Should(ContainSubstring("My description 2")) + Ω(callerSkip).Should(Equal(4)) + }) + + It("should be able to timeout", func() { + a := New(AsyncAssertionTypeEventually, func() int { + return 0 + }, fakeFailHandler, time.Duration(0.1*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(Equal(0), "My description %d", 2) + + Ω(failureMessage).Should(ContainSubstring("Timed out after")) + Ω(failureMessage).Should(ContainSubstring(": 0"), "Should pass the correct value to the matcher message formatter.") + Ω(failureMessage).Should(ContainSubstring("My description 2")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + + Context("with a function that returns multiple values", func() { + It("should eventually succeed if the additional arguments are nil", func() { + i := 0 + Eventually(func() (int, error) { + i++ + return i, nil + }).Should(Equal(10)) + }) + + It("should eventually timeout if the additional arguments are not nil", func() { + i := 0 + a := New(AsyncAssertionTypeEventually, func() (int, error) { + i++ + return i, errors.New("bam") + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + a.Should(Equal(2)) + + Ω(failureMessage).Should(ContainSubstring("Timed out after")) + Ω(failureMessage).Should(ContainSubstring("Error:")) + Ω(failureMessage).Should(ContainSubstring("bam")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + + Context("Making an assertion without a registered fail handler", func() { + It("should panic", func() { + defer func() { + e := recover() + RegisterFailHandler(Fail) + if e == nil { + Fail("expected a panic to have occurred") + } + }() + + RegisterFailHandler(nil) + c := make(chan bool, 1) + c <- true + Eventually(c).Should(Receive()) + }) + }) + }) + + Describe("Consistently", func() { + Describe("The positive case", func() { + Context("when the matcher consistently passes for the duration", func() { + It("should pass", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() string { + calls++ + return "foo" + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(Equal("foo")) + Ω(calls).Should(BeNumerically(">", 8)) + Ω(calls).Should(BeNumerically("<=", 10)) + Ω(failureMessage).Should(BeZero()) + }) + }) + + Context("when the matcher fails at some point", func() { + It("should fail", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() interface{} { + calls++ + if calls > 5 { + return "bar" + } + return "foo" + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(Equal("foo")) + Ω(failureMessage).Should(ContainSubstring("to equal")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + + Context("when the matcher errors at some point", func() { + It("should fail", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() interface{} { + calls++ + if calls > 5 { + return 3 + } + return []int{1, 2, 3} + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.Should(HaveLen(3)) + Ω(failureMessage).Should(ContainSubstring("HaveLen matcher expects")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + }) + + Describe("The negative case", func() { + Context("when the matcher consistently passes for the duration", func() { + It("should pass", func() { + c := make(chan bool) + a := New(AsyncAssertionTypeConsistently, c, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(Receive()) + Ω(failureMessage).Should(BeZero()) + }) + }) + + Context("when the matcher fails at some point", func() { + It("should fail", func() { + c := make(chan bool) + go func() { + time.Sleep(time.Duration(100 * time.Millisecond)) + c <- true + }() + + a := New(AsyncAssertionTypeConsistently, c, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(Receive()) + Ω(failureMessage).Should(ContainSubstring("not to receive anything")) + }) + }) + + Context("when the matcher errors at some point", func() { + It("should fail", func() { + calls := 0 + a := New(AsyncAssertionTypeConsistently, func() interface{} { + calls++ + return calls + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + + a.ShouldNot(BeNumerically(">", 5)) + Ω(failureMessage).Should(ContainSubstring("not to be >")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + }) + + Context("with a function that returns multiple values", func() { + It("should consistently succeed if the additional arguments are nil", func() { + i := 2 + Consistently(func() (int, error) { + i++ + return i, nil + }).Should(BeNumerically(">=", 2)) + }) + + It("should eventually timeout if the additional arguments are not nil", func() { + i := 2 + a := New(AsyncAssertionTypeEventually, func() (int, error) { + i++ + return i, errors.New("bam") + }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) + a.Should(BeNumerically(">=", 2)) + + Ω(failureMessage).Should(ContainSubstring("Error:")) + Ω(failureMessage).Should(ContainSubstring("bam")) + Ω(callerSkip).Should(Equal(4)) + }) + }) + + Context("Making an assertion without a registered fail handler", func() { + It("should panic", func() { + defer func() { + e := recover() + RegisterFailHandler(Fail) + if e == nil { + Fail("expected a panic to have occurred") + } + }() + + RegisterFailHandler(nil) + c := make(chan bool) + Consistently(c).ShouldNot(Receive()) + }) + }) + }) + + Context("when passed a function with the wrong # or arguments & returns", func() { + It("should panic", func() { + Ω(func() { + New(AsyncAssertionTypeEventually, func() {}, fakeFailHandler, 0, 0, 1) + }).Should(Panic()) + + Ω(func() { + New(AsyncAssertionTypeEventually, func(a string) int { return 0 }, fakeFailHandler, 0, 0, 1) + }).Should(Panic()) + + Ω(func() { + New(AsyncAssertionTypeEventually, func() int { return 0 }, fakeFailHandler, 0, 0, 1) + }).ShouldNot(Panic()) + + Ω(func() { + New(AsyncAssertionTypeEventually, func() (int, error) { return 0, nil }, fakeFailHandler, 0, 0, 1) + }).ShouldNot(Panic()) + }) + }) + + Describe("bailing early", func() { + Context("when actual is a value", func() { + It("Eventually should bail out and fail early if the matcher says to", func() { + c := make(chan bool) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(c, 0.1).Should(Receive()) + }) + Ω(time.Since(t)).Should(BeNumerically("<", 90*time.Millisecond)) + + Ω(failures).Should(HaveLen(1)) + }) + }) + + Context("when actual is a function", func() { + It("should never bail early", func() { + c := make(chan bool) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(func() chan bool { + return c + }, 0.1).Should(Receive()) + }) + Ω(time.Since(t)).Should(BeNumerically(">=", 90*time.Millisecond)) + + Ω(failures).Should(HaveLen(1)) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go new file mode 100644 index 00000000000..66cad88a1fb --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go @@ -0,0 +1,25 @@ +package oraclematcher + +import "github.com/onsi/gomega/types" + +/* +GomegaMatchers that also match the OracleMatcher interface can convey information about +whether or not their result will change upon future attempts. + +This allows `Eventually` and `Consistently` to short circuit if success becomes impossible. + +For example, a process' exit code can never change. So, gexec's Exit matcher returns `true` +for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. +*/ +type OracleMatcher interface { + MatchMayChangeInTheFuture(actual interface{}) bool +} + +func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool { + oracleMatcher, ok := matcher.(OracleMatcher) + if !ok { + return true + } + + return oracleMatcher.MatchMayChangeInTheFuture(value) +} diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go new file mode 100644 index 00000000000..7871fd43953 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go @@ -0,0 +1,40 @@ +package testingtsupport + +import ( + "regexp" + "runtime/debug" + "strings" + + "github.com/onsi/gomega/types" +) + +type gomegaTestingT interface { + Errorf(format string, args ...interface{}) +} + +func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler { + return func(message string, callerSkip ...int) { + skip := 1 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + stackTrace := pruneStack(string(debug.Stack()), skip) + t.Errorf("\n%s\n%s", stackTrace, message) + } +} + +func pruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + if !re.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go new file mode 100644 index 00000000000..b9fbd6c6404 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go @@ -0,0 +1,12 @@ +package testingtsupport_test + +import ( + . "github.com/onsi/gomega" + + "testing" +) + +func TestTestingT(t *testing.T) { + RegisterTestingT(t) + Ω(true).Should(BeTrue()) +} diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go new file mode 100644 index 00000000000..0c30aa1c2c4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -0,0 +1,418 @@ +package gomega + +import ( + "time" + + "github.com/onsi/gomega/matchers" + "github.com/onsi/gomega/types" +) + +//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about +//types when performing comparisons. +//It is an error for both actual and expected to be nil. Use BeNil() instead. +func Equal(expected interface{}) types.GomegaMatcher { + return &matchers.EqualMatcher{ + Expected: expected, + } +} + +//BeEquivalentTo is more lax than Equal, allowing equality between different types. +//This is done by converting actual to have the type of expected before +//attempting equality with reflect.DeepEqual. +//It is an error for actual and expected to be nil. Use BeNil() instead. +func BeEquivalentTo(expected interface{}) types.GomegaMatcher { + return &matchers.BeEquivalentToMatcher{ + Expected: expected, + } +} + +//BeIdenticalTo uses the == operator to compare actual with expected. +//BeIdenticalTo is strict about types when performing comparisons. +//It is an error for both actual and expected to be nil. Use BeNil() instead. +func BeIdenticalTo(expected interface{}) types.GomegaMatcher { + return &matchers.BeIdenticalToMatcher{ + Expected: expected, + } +} + +//BeNil succeeds if actual is nil +func BeNil() types.GomegaMatcher { + return &matchers.BeNilMatcher{} +} + +//BeTrue succeeds if actual is true +func BeTrue() types.GomegaMatcher { + return &matchers.BeTrueMatcher{} +} + +//BeFalse succeeds if actual is false +func BeFalse() types.GomegaMatcher { + return &matchers.BeFalseMatcher{} +} + +//HaveOccurred succeeds if actual is a non-nil error +//The typical Go error checking pattern looks like: +// err := SomethingThatMightFail() +// Ω(err).ShouldNot(HaveOccurred()) +func HaveOccurred() types.GomegaMatcher { + return &matchers.HaveOccurredMatcher{} +} + +//Succeed passes if actual is a nil error +//Succeed is intended to be used with functions that return a single error value. Instead of +// err := SomethingThatMightFail() +// Ω(err).ShouldNot(HaveOccurred()) +// +//You can write: +// Ω(SomethingThatMightFail()).Should(Succeed()) +// +//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect +//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. +//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. +func Succeed() types.GomegaMatcher { + return &matchers.SucceedMatcher{} +} + +//MatchError succeeds if actual is a non-nil error that matches the passed in string/error. +// +//These are valid use-cases: +// Ω(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" +// Ω(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// +//It is an error for err to be nil or an object that does not implement the Error interface +func MatchError(expected interface{}) types.GomegaMatcher { + return &matchers.MatchErrorMatcher{ + Expected: expected, + } +} + +//BeClosed succeeds if actual is a closed channel. +//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil +// +//In order to check whether or not the channel is closed, Gomega must try to read from the channel +//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about +//values coming down the channel. +// +//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before +//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). +// +//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. +func BeClosed() types.GomegaMatcher { + return &matchers.BeClosedMatcher{} +} + +//Receive succeeds if there is a value to be received on actual. +//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. +// +//Receive returns immediately and never blocks: +// +//- If there is nothing on the channel `c` then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If the channel `c` is closed then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If there is something on the channel `c` ready to be read, then Ω(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. +// +//If you have a go-routine running in the background that will write to channel `c` you can: +// Eventually(c).Should(Receive()) +// +//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) +// +//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: +// Consistently(c).ShouldNot(Receive()) +// +//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: +// Ω(c).Should(Receive(Equal("foo"))) +// +//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. +// +//Passing Receive a matcher is especially useful when paired with Eventually: +// +// Eventually(c).Should(Receive(ContainSubstring("bar"))) +// +//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. +// +//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing)) +// Ω(myThing.Sprocket).Should(Equal("foo")) +// Ω(myThing.IsValid()).Should(BeTrue()) +func Receive(args ...interface{}) types.GomegaMatcher { + var arg interface{} + if len(args) > 0 { + arg = args[0] + } + + return &matchers.ReceiveMatcher{ + Arg: arg, + } +} + +//BeSent succeeds if a value can be sent to actual. +//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. +//In addition, actual must not be closed. +// +//BeSent never blocks: +// +//- If the channel `c` is not ready to receive then Ω(c).Should(BeSent("foo")) will fail immediately +//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout +//- If the channel `c` is closed then Ω(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately +// +//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). +//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. +func BeSent(arg interface{}) types.GomegaMatcher { + return &matchers.BeSentMatcher{ + Arg: arg, + } +} + +//MatchRegexp succeeds if actual is a string or stringer that matches the +//passed-in regexp. Optional arguments can be provided to construct a regexp +//via fmt.Sprintf(). +func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { + return &matchers.MatchRegexpMatcher{ + Regexp: regexp, + Args: args, + } +} + +//ContainSubstring succeeds if actual is a string or stringer that contains the +//passed-in regexp. Optional arguments can be provided to construct the substring +//via fmt.Sprintf(). +func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { + return &matchers.ContainSubstringMatcher{ + Substr: substr, + Args: args, + } +} + +//HavePrefix succeeds if actual is a string or stringer that contains the +//passed-in string as a prefix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HavePrefixMatcher{ + Prefix: prefix, + Args: args, + } +} + +//HaveSuffix succeeds if actual is a string or stringer that contains the +//passed-in string as a suffix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HaveSuffixMatcher{ + Suffix: suffix, + Args: args, + } +} + +//MatchJSON succeeds if actual is a string or stringer of JSON that matches +//the expected JSON. The JSONs are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +func MatchJSON(json interface{}) types.GomegaMatcher { + return &matchers.MatchJSONMatcher{ + JSONToMatch: json, + } +} + +//MatchYAML succeeds if actual is a string or stringer of YAML that matches +//the expected YAML. The YAML's are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +func MatchYAML(yaml interface{}) types.GomegaMatcher { + return &matchers.MatchYAMLMatcher{ + YAMLToMatch: yaml, + } +} + +//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. +func BeEmpty() types.GomegaMatcher { + return &matchers.BeEmptyMatcher{} +} + +//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. +func HaveLen(count int) types.GomegaMatcher { + return &matchers.HaveLenMatcher{ + Count: count, + } +} + +//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice. +func HaveCap(count int) types.GomegaMatcher { + return &matchers.HaveCapMatcher{ + Count: count, + } +} + +//BeZero succeeds if actual is the zero value for its type or if actual is nil. +func BeZero() types.GomegaMatcher { + return &matchers.BeZeroMatcher{} +} + +//ContainElement succeeds if actual contains the passed in element. +//By default ContainElement() uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Ω([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) +// +//Actual must be an array, slice or map. +//For maps, ContainElement searches through the map's values. +func ContainElement(element interface{}) types.GomegaMatcher { + return &matchers.ContainElementMatcher{ + Element: element, + } +} + +//ConsistOf succeeds if actual contains preciely the elements passed into the matcher. The ordering of the elements does not matter. +//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// +//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. +// +//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it +//is the only element passed in to ConsistOf: +// +// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// +//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. +func ConsistOf(elements ...interface{}) types.GomegaMatcher { + return &matchers.ConsistOfMatcher{ + Elements: elements, + } +} + +//HaveKey succeeds if actual is a map with the passed in key. +//By default HaveKey uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) +func HaveKey(key interface{}) types.GomegaMatcher { + return &matchers.HaveKeyMatcher{ + Key: key, + } +} + +//HaveKeyWithValue succeeds if actual is a map with the passed in key and value. +//By default HaveKeyWithValue uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) +// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) +func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { + return &matchers.HaveKeyWithValueMatcher{ + Key: key, + Value: value, + } +} + +//BeNumerically performs numerical assertions in a type-agnostic way. +//Actual and expected should be numbers, though the specific type of +//number is irrelevant (floa32, float64, uint8, etc...). +// +//There are six, self-explanatory, supported comparators: +// Ω(1.0).Should(BeNumerically("==", 1)) +// Ω(1.0).Should(BeNumerically("~", 0.999, 0.01)) +// Ω(1.0).Should(BeNumerically(">", 0.9)) +// Ω(1.0).Should(BeNumerically(">=", 1.0)) +// Ω(1.0).Should(BeNumerically("<", 3)) +// Ω(1.0).Should(BeNumerically("<=", 1.0)) +func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { + return &matchers.BeNumericallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + } +} + +//BeTemporally compares time.Time's like BeNumerically +//Actual and expected must be time.Time. The comparators are the same as for BeNumerically +// Ω(time.Now()).Should(BeTemporally(">", time.Time{})) +// Ω(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) +func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher { + return &matchers.BeTemporallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + Threshold: threshold, + } +} + +//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. +//It will return an error when one of the values is nil. +// Ω(0).Should(BeAssignableToTypeOf(0)) // Same values +// Ω(5).Should(BeAssignableToTypeOf(-1)) // different values same type +// Ω("foo").Should(BeAssignableToTypeOf("bar")) // different values same type +// Ω(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) +func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { + return &matchers.AssignableToTypeOfMatcher{ + Expected: expected, + } +} + +//Panic succeeds if actual is a function that, when invoked, panics. +//Actual must be a function that takes no arguments and returns no results. +func Panic() types.GomegaMatcher { + return &matchers.PanicMatcher{} +} + +//BeAnExistingFile succeeds if a file exists. +//Actual must be a string representing the abs path to the file being checked. +func BeAnExistingFile() types.GomegaMatcher { + return &matchers.BeAnExistingFileMatcher{} +} + +//BeARegularFile succeeds iff a file exists and is a regular file. +//Actual must be a string representing the abs path to the file being checked. +func BeARegularFile() types.GomegaMatcher { + return &matchers.BeARegularFileMatcher{} +} + +//BeADirectory succeeds iff a file exists and is a directory. +//Actual must be a string representing the abs path to the file being checked. +func BeADirectory() types.GomegaMatcher { + return &matchers.BeADirectoryMatcher{} +} + +//And succeeds only if all of the given matchers succeed. +//The matchers are tried in order, and will fail-fast if one doesn't succeed. +// Expect("hi").To(And(HaveLen(2), Equal("hi")) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func And(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.AndMatcher{Matchers: ms} +} + +//SatisfyAll is an alias for And(). +// Ω("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) +func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return And(matchers...) +} + +//Or succeeds if any of the given matchers succeed. +//The matchers are tried in order and will return immediately upon the first successful match. +// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Or(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.OrMatcher{Matchers: ms} +} + +//SatisfyAny is an alias for Or(). +// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) +func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return Or(matchers...) +} + +//Not negates the given matcher; it succeeds if the given matcher fails. +// Expect(1).To(Not(Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Not(matcher types.GomegaMatcher) types.GomegaMatcher { + return &matchers.NotMatcher{Matcher: matcher} +} + +//WithTransform applies the `transform` to the actual value and matches it against `matcher`. +//The given transform must be a function of one parameter that returns one value. +// var plus1 = func(i int) int { return i + 1 } +// Expect(1).To(WithTransform(plus1, Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { + return matchers.NewWithTransformMatcher(transform, matcher) +} diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go new file mode 100644 index 00000000000..94c42a7db71 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/and.go @@ -0,0 +1,64 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AndMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstFailedMatcher types.GomegaMatcher +} + +func (m *AndMatcher) Match(actual interface{}) (success bool, err error) { + m.firstFailedMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if !success || err != nil { + m.firstFailedMatcher = matcher + return false, err + } + } + return true, nil +} + +func (m *AndMatcher) FailureMessage(actual interface{}) (message string) { + return m.firstFailedMatcher.FailureMessage(actual) +} + +func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers)) +} + +func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: T, F, => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become T + + Match eval: T, T, T => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to F. + */ + + if m.firstFailedMatcher == nil { + // so all matchers succeeded.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } else { + // one of the matchers failed.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/and_test.go b/vendor/github.com/onsi/gomega/matchers/and_test.go new file mode 100644 index 00000000000..acf778cd6d3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/and_test.go @@ -0,0 +1,103 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" + "github.com/onsi/gomega/types" +) + +// sample data +var ( + // example input + input = "hi" + // some matchers that succeed against the input + true1 = HaveLen(2) + true2 = Equal("hi") + true3 = MatchRegexp("hi") + // some matchers that fail against the input. + false1 = HaveLen(1) + false2 = Equal("hip") + false3 = MatchRegexp("hope") +) + +// verifyFailureMessage expects the matcher to fail with the given input, and verifies the failure message. +func verifyFailureMessage(m types.GomegaMatcher, input string, expectedFailureMsgFragment string) { + Expect(m.Match(input)).To(BeFalse()) + Expect(m.FailureMessage(input)).To(Equal( + "Expected\n : " + input + "\n" + expectedFailureMsgFragment)) +} + +var _ = Describe("AndMatcher", func() { + It("works with positive cases", func() { + Expect(input).To(And()) + Expect(input).To(And(true1)) + Expect(input).To(And(true1, true2)) + Expect(input).To(And(true1, true2, true3)) + + // use alias + Expect(input).To(SatisfyAll(true1, true2, true3)) + }) + + It("works with negative cases", func() { + Expect(input).ToNot(And(false1, false2)) + Expect(input).ToNot(And(true1, true2, false3)) + Expect(input).ToNot(And(true1, false2, false3)) + Expect(input).ToNot(And(false1, true1, true2)) + }) + + Context("failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(And(false1, true1), input, "to have length 1") + verifyFailureMessage(And(true1, false2), input, "to equal\n : hip") + verifyFailureMessage(And(true1, true2, false3), input, "to match regular expression\n : hope") + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(And(true1, true2)), input, + `To not satisfy all of these matchers: [%!s(*matchers.HaveLenMatcher=&{2}) %!s(*matchers.EqualMatcher=&{hi})]`) + }) + }) + }) + + Context("MatchMayChangeInTheFuture", func() { + Context("Match returned false", func() { + Context("returns value of the failed matcher", func() { + It("false if failed matcher not going to change", func() { + // 3 matchers: 1st returns true, 2nd returns false and is not going to change, 3rd is never called + m := And(Not(BeNil()), Or(), Equal(1)) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // empty Or() indicates not going to change + }) + It("true if failed matcher indicates it might change", func() { + // 3 matchers: 1st returns true, 2nd returns false and "might" change, 3rd is never called + m := And(Not(BeNil()), Equal(5), Equal(1)) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // Equal(5) indicates it might change + }) + }) + }) + Context("Match returned true", func() { + It("returns true if any of the matchers could change", func() { + // 3 matchers, all return true, and all could change + m := And(Not(BeNil()), Equal("hi"), HaveLen(2)) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // all 3 of these matchers default to 'true' + }) + It("returns false if none of the matchers could change", func() { + // empty And() has the property of always matching, and never can change since there are no sub-matchers that could change + m := And() + Expect(m.Match("anything")).To(BeTrue()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse()) + + // And() with 3 sub-matchers that return true, and can't change + m = And(And(), And(), And()) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*AndMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // the 3 empty And()'s won't change + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go new file mode 100644 index 00000000000..89a1fc2116b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go @@ -0,0 +1,31 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type AssignableToTypeOfMatcher struct { + Expected interface{} +} + +func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil || matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + + actualType := reflect.TypeOf(actual) + expectedType := reflect.TypeOf(matcher.Expected) + + return actualType.AssignableTo(expectedType), nil +} + +func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected)) +} + +func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected)) +} diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go new file mode 100644 index 00000000000..d2280e0506d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go @@ -0,0 +1,30 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("AssignableToTypeOf", func() { + Context("When asserting assignability between types", func() { + It("should do the right thing", func() { + Ω(0).Should(BeAssignableToTypeOf(0)) + Ω(5).Should(BeAssignableToTypeOf(-1)) + Ω("foo").Should(BeAssignableToTypeOf("bar")) + Ω(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) + + Ω(0).ShouldNot(BeAssignableToTypeOf("bar")) + Ω(5).ShouldNot(BeAssignableToTypeOf(struct{ Foo string }{})) + Ω("foo").ShouldNot(BeAssignableToTypeOf(42)) + }) + }) + + Context("When asserting nil values", func() { + It("should error", func() { + success, err := (&AssignableToTypeOfMatcher{Expected: nil}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go new file mode 100644 index 00000000000..7b6975e41e6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notADirectoryError struct { + os.FileInfo +} + +func (t notADirectoryError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.Mode().IsRegular(): + return "file is a regular file" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeADirectoryMatcher struct { + expected interface{} + err error +} + +func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsDir() { + matcher.err = notADirectoryError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err)) +} + +func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a directory")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go new file mode 100644 index 00000000000..e59d7699018 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory_test.go @@ -0,0 +1,40 @@ +package matchers_test + +import ( + "io/ioutil" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeADirectoryMatcher", func() { + Context("when passed a string", func() { + It("should do the right thing", func() { + Ω("/dne/test").ShouldNot(BeADirectory()) + + tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpFile.Name()) + Ω(tmpFile.Name()).ShouldNot(BeADirectory()) + + tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpDir) + Ω(tmpDir).Should(BeADirectory()) + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + success, err := (&BeADirectoryMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeADirectoryMatcher{}).Match(true) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go new file mode 100644 index 00000000000..e239131fb61 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notARegularFileError struct { + os.FileInfo +} + +func (t notARegularFileError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.IsDir(): + return "file is a directory" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeARegularFileMatcher struct { + expected interface{} + err error +} + +func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsRegular() { + matcher.err = notARegularFileError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err)) +} + +func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a regular file")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go new file mode 100644 index 00000000000..951e750d642 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file_test.go @@ -0,0 +1,40 @@ +package matchers_test + +import ( + "io/ioutil" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeARegularFileMatcher", func() { + Context("when passed a string", func() { + It("should do the right thing", func() { + Ω("/dne/test").ShouldNot(BeARegularFile()) + + tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpFile.Name()) + Ω(tmpFile.Name()).Should(BeARegularFile()) + + tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpDir) + Ω(tmpDir).ShouldNot(BeARegularFile()) + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + success, err := (&BeARegularFileMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeARegularFileMatcher{}).Match(true) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go new file mode 100644 index 00000000000..d42eba22344 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go @@ -0,0 +1,38 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type BeAnExistingFileMatcher struct { + expected interface{} +} + +func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path") + } + + if _, err = os.Stat(actualFilename); err != nil { + switch { + case os.IsNotExist(err): + return false, nil + default: + return false, err + } + } + + return true, nil +} + +func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to exist")) +} + +func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to exist")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go new file mode 100644 index 00000000000..775f7b6aca1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file_test.go @@ -0,0 +1,40 @@ +package matchers_test + +import ( + "io/ioutil" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeAnExistingFileMatcher", func() { + Context("when passed a string", func() { + It("should do the right thing", func() { + Ω("/dne/test").ShouldNot(BeAnExistingFile()) + + tmpFile, err := ioutil.TempFile("", "gomega-test-tempfile") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpFile.Name()) + Ω(tmpFile.Name()).Should(BeAnExistingFile()) + + tmpDir, err := ioutil.TempDir("", "gomega-test-tempdir") + Ω(err).ShouldNot(HaveOccurred()) + defer os.Remove(tmpDir) + Ω(tmpDir).Should(BeAnExistingFile()) + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + success, err := (&BeAnExistingFileMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeAnExistingFileMatcher{}).Match(true) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go new file mode 100644 index 00000000000..c1b499597d3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go @@ -0,0 +1,45 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type BeClosedMatcher struct { +} + +func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open. Got:\n%s", format.Object(actual, 1)) + } + + winnerIndex, _, open := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var closed bool + if winnerIndex == 0 { + closed = !open + } else if winnerIndex == 1 { + closed = false + } + + return closed, nil +} + +func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be closed") +} + +func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be open") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go new file mode 100644 index 00000000000..b2c40c91039 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher_test.go @@ -0,0 +1,70 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeClosedMatcher", func() { + Context("when passed a channel", func() { + It("should do the right thing", func() { + openChannel := make(chan bool) + Ω(openChannel).ShouldNot(BeClosed()) + + var openReaderChannel <-chan bool + openReaderChannel = openChannel + Ω(openReaderChannel).ShouldNot(BeClosed()) + + closedChannel := make(chan bool) + close(closedChannel) + + Ω(closedChannel).Should(BeClosed()) + + var closedReaderChannel <-chan bool + closedReaderChannel = closedChannel + Ω(closedReaderChannel).Should(BeClosed()) + }) + }) + + Context("when passed a send-only channel", func() { + It("should error", func() { + openChannel := make(chan bool) + var openWriterChannel chan<- bool + openWriterChannel = openChannel + + success, err := (&BeClosedMatcher{}).Match(openWriterChannel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + closedChannel := make(chan bool) + close(closedChannel) + + var closedWriterChannel chan<- bool + closedWriterChannel = closedChannel + + success, err = (&BeClosedMatcher{}).Match(closedWriterChannel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + }) + }) + + Context("when passed something else", func() { + It("should error", func() { + var nilChannel chan bool + + success, err := (&BeClosedMatcher{}).Match(nilChannel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeClosedMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeClosedMatcher{}).Match(7) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go new file mode 100644 index 00000000000..55bdd7d15df --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -0,0 +1,26 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type BeEmptyMatcher struct { +} + +func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == 0, nil +} + +func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be empty") +} + +func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be empty") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go new file mode 100644 index 00000000000..541c1b951ec --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher_test.go @@ -0,0 +1,52 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeEmpty", func() { + Context("when passed a supported type", func() { + It("should do the right thing", func() { + Ω("").Should(BeEmpty()) + Ω(" ").ShouldNot(BeEmpty()) + + Ω([0]int{}).Should(BeEmpty()) + Ω([1]int{1}).ShouldNot(BeEmpty()) + + Ω([]int{}).Should(BeEmpty()) + Ω([]int{1}).ShouldNot(BeEmpty()) + + Ω(map[string]int{}).Should(BeEmpty()) + Ω(map[string]int{"a": 1}).ShouldNot(BeEmpty()) + + c := make(chan bool, 1) + Ω(c).Should(BeEmpty()) + c <- true + Ω(c).ShouldNot(BeEmpty()) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should be true", func() { + var nilSlice []int + Ω(nilSlice).Should(BeEmpty()) + + var nilMap map[int]string + Ω(nilMap).Should(BeEmpty()) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&BeEmptyMatcher{}).Match(0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeEmptyMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go new file mode 100644 index 00000000000..32a0c3108a4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type BeEquivalentToMatcher struct { + Expected interface{} +} + +func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Both actual and expected must not be nil.") + } + + convertedActual := actual + + if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) { + convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface() + } + + return reflect.DeepEqual(convertedActual, matcher.Expected), nil +} + +func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be equivalent to", matcher.Expected) +} + +func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be equivalent to", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go new file mode 100644 index 00000000000..def5104fa75 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go @@ -0,0 +1,50 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeEquivalentTo", func() { + Context("when asserting that nil is equivalent to nil", func() { + It("should error", func() { + success, err := (&BeEquivalentToMatcher{Expected: nil}).Match(nil) + + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("When asserting on nil", func() { + It("should do the right thing", func() { + Ω("foo").ShouldNot(BeEquivalentTo(nil)) + Ω(nil).ShouldNot(BeEquivalentTo(3)) + Ω([]int{1, 2}).ShouldNot(BeEquivalentTo(nil)) + }) + }) + + Context("When asserting on type aliases", func() { + It("should the right thing", func() { + Ω(StringAlias("foo")).Should(BeEquivalentTo("foo")) + Ω("foo").Should(BeEquivalentTo(StringAlias("foo"))) + Ω(StringAlias("foo")).ShouldNot(BeEquivalentTo("bar")) + Ω("foo").ShouldNot(BeEquivalentTo(StringAlias("bar"))) + }) + }) + + Context("When asserting on numbers", func() { + It("should convert actual to expected and do the right thing", func() { + Ω(5).Should(BeEquivalentTo(5)) + Ω(5.0).Should(BeEquivalentTo(5.0)) + Ω(5).Should(BeEquivalentTo(5.0)) + + Ω(5).ShouldNot(BeEquivalentTo("5")) + Ω(5).ShouldNot(BeEquivalentTo(3)) + + //Here be dragons! + Ω(5.1).Should(BeEquivalentTo(5)) + Ω(5).ShouldNot(BeEquivalentTo(5.1)) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go new file mode 100644 index 00000000000..0b224cbbc64 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go @@ -0,0 +1,25 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type BeFalseMatcher struct { +} + +func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual == false, nil +} + +func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be false") +} + +func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be false") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go new file mode 100644 index 00000000000..3965a2c5392 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher_test.go @@ -0,0 +1,20 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeFalse", func() { + It("should handle true and false correctly", func() { + Ω(true).ShouldNot(BeFalse()) + Ω(false).Should(BeFalse()) + }) + + It("should only support booleans", func() { + success, err := (&BeFalseMatcher{}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go new file mode 100644 index 00000000000..fdcda4d1fb9 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go @@ -0,0 +1,37 @@ +package matchers + +import ( + "fmt" + "runtime" + + "github.com/onsi/gomega/format" +) + +type BeIdenticalToMatcher struct { + Expected interface{} +} + +func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + success = false + matchErr = nil + } + } + }() + + return actual == matcher.Expected, nil +} + +func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string { + return format.Message(actual, "to be identical to", matcher.Expected) +} + +func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string { + return format.Message(actual, "not to be identical to", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go new file mode 100644 index 00000000000..8b90a1a619e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to_test.go @@ -0,0 +1,61 @@ +package matchers_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeIdenticalTo", func() { + Context("when asserting that nil equals nil", func() { + It("should error", func() { + success, err := (&BeIdenticalToMatcher{Expected: nil}).Match(nil) + + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + It("should treat the same pointer to a struct as identical", func() { + mySpecialStruct := myCustomType{} + Ω(&mySpecialStruct).Should(BeIdenticalTo(&mySpecialStruct)) + Ω(&myCustomType{}).ShouldNot(BeIdenticalTo(&mySpecialStruct)) + }) + + It("should be strict about types", func() { + Ω(5).ShouldNot(BeIdenticalTo("5")) + Ω(5).ShouldNot(BeIdenticalTo(5.0)) + Ω(5).ShouldNot(BeIdenticalTo(3)) + }) + + It("should treat primtives as identical", func() { + Ω("5").Should(BeIdenticalTo("5")) + Ω("5").ShouldNot(BeIdenticalTo("55")) + + Ω(5.55).Should(BeIdenticalTo(5.55)) + Ω(5.55).ShouldNot(BeIdenticalTo(6.66)) + + Ω(5).Should(BeIdenticalTo(5)) + Ω(5).ShouldNot(BeIdenticalTo(55)) + }) + + It("should treat the same pointers to a slice as identical", func() { + mySlice := []int{1, 2} + Ω(&mySlice).Should(BeIdenticalTo(&mySlice)) + Ω(&mySlice).ShouldNot(BeIdenticalTo(&[]int{1, 2})) + }) + + It("should treat the same pointers to a map as identical", func() { + myMap := map[string]string{"a": "b", "c": "d"} + Ω(&myMap).Should(BeIdenticalTo(&myMap)) + Ω(myMap).ShouldNot(BeIdenticalTo(map[string]string{"a": "b", "c": "d"})) + }) + + It("should treat the same pointers to an error as identical", func() { + myError := errors.New("foo") + Ω(&myError).Should(BeIdenticalTo(&myError)) + Ω(errors.New("foo")).ShouldNot(BeIdenticalTo(errors.New("bar"))) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go new file mode 100644 index 00000000000..7ee84fe1bcf --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go @@ -0,0 +1,18 @@ +package matchers + +import "github.com/onsi/gomega/format" + +type BeNilMatcher struct { +} + +func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) { + return isNil(actual), nil +} + +func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be nil") +} + +func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be nil") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go new file mode 100644 index 00000000000..75332536329 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher_test.go @@ -0,0 +1,28 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("BeNil", func() { + It("should succeed when passed nil", func() { + Ω(nil).Should(BeNil()) + }) + + It("should succeed when passed a typed nil", func() { + var a []int + Ω(a).Should(BeNil()) + }) + + It("should succeed when passing nil pointer", func() { + var f *struct{} + Ω(f).Should(BeNil()) + }) + + It("should not succeed when not passed nil", func() { + Ω(0).ShouldNot(BeNil()) + Ω(false).ShouldNot(BeNil()) + Ω("").ShouldNot(BeNil()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go new file mode 100644 index 00000000000..52f83fe3f39 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -0,0 +1,119 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "math" +) + +type BeNumericallyMatcher struct { + Comparator string + CompareTo []interface{} +} + +func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo[0]) +} + +func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo[0]) +} + +func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) { + if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 { + return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1)) + } + if !isNumber(actual) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1)) + } + if !isNumber(matcher.CompareTo[0]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + if isFloat(actual) || isFloat(matcher.CompareTo[0]) { + var secondOperand float64 = 1e-8 + if len(matcher.CompareTo) == 2 { + secondOperand = toFloat(matcher.CompareTo[1]) + } + success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand) + } else if isInteger(actual) { + var secondOperand int64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toInteger(matcher.CompareTo[1]) + } + success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand) + } else if isUnsignedInteger(actual) { + var secondOperand uint64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toUnsignedInteger(matcher.CompareTo[1]) + } + success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand) + } else { + return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1)) + } + + return success, nil +} + +func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) { + switch matcher.Comparator { + case "==", "~": + diff := actual - compareTo + return -threshold <= diff && diff <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) { + switch matcher.Comparator { + case "==", "~": + if actual < compareTo { + actual, compareTo = compareTo, actual + } + return actual-compareTo <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) { + switch matcher.Comparator { + case "~": + return math.Abs(actual-compareTo) <= threshold + case "==": + return (actual == compareTo) + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go new file mode 100644 index 00000000000..43fdb1fe0b6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go @@ -0,0 +1,148 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeNumerically", func() { + Context("when passed a number", func() { + It("should support ==", func() { + Ω(uint32(5)).Should(BeNumerically("==", 5)) + Ω(float64(5.0)).Should(BeNumerically("==", 5)) + Ω(int8(5)).Should(BeNumerically("==", 5)) + }) + + It("should not have false positives", func() { + Ω(5.1).ShouldNot(BeNumerically("==", 5)) + Ω(5).ShouldNot(BeNumerically("==", 5.1)) + }) + + It("should support >", func() { + Ω(uint32(5)).Should(BeNumerically(">", 4)) + Ω(float64(5.0)).Should(BeNumerically(">", 4.9)) + Ω(int8(5)).Should(BeNumerically(">", 4)) + + Ω(uint32(5)).ShouldNot(BeNumerically(">", 5)) + Ω(float64(5.0)).ShouldNot(BeNumerically(">", 5.0)) + Ω(int8(5)).ShouldNot(BeNumerically(">", 5)) + }) + + It("should support <", func() { + Ω(uint32(5)).Should(BeNumerically("<", 6)) + Ω(float64(5.0)).Should(BeNumerically("<", 5.1)) + Ω(int8(5)).Should(BeNumerically("<", 6)) + + Ω(uint32(5)).ShouldNot(BeNumerically("<", 5)) + Ω(float64(5.0)).ShouldNot(BeNumerically("<", 5.0)) + Ω(int8(5)).ShouldNot(BeNumerically("<", 5)) + }) + + It("should support >=", func() { + Ω(uint32(5)).Should(BeNumerically(">=", 4)) + Ω(float64(5.0)).Should(BeNumerically(">=", 4.9)) + Ω(int8(5)).Should(BeNumerically(">=", 4)) + + Ω(uint32(5)).Should(BeNumerically(">=", 5)) + Ω(float64(5.0)).Should(BeNumerically(">=", 5.0)) + Ω(int8(5)).Should(BeNumerically(">=", 5)) + + Ω(uint32(5)).ShouldNot(BeNumerically(">=", 6)) + Ω(float64(5.0)).ShouldNot(BeNumerically(">=", 5.1)) + Ω(int8(5)).ShouldNot(BeNumerically(">=", 6)) + }) + + It("should support <=", func() { + Ω(uint32(5)).Should(BeNumerically("<=", 6)) + Ω(float64(5.0)).Should(BeNumerically("<=", 5.1)) + Ω(int8(5)).Should(BeNumerically("<=", 6)) + + Ω(uint32(5)).Should(BeNumerically("<=", 5)) + Ω(float64(5.0)).Should(BeNumerically("<=", 5.0)) + Ω(int8(5)).Should(BeNumerically("<=", 5)) + + Ω(uint32(5)).ShouldNot(BeNumerically("<=", 4)) + Ω(float64(5.0)).ShouldNot(BeNumerically("<=", 4.9)) + Ω(int8(5)).Should(BeNumerically("<=", 5)) + }) + + Context("when passed ~", func() { + Context("when passed a float", func() { + Context("and there is no precision parameter", func() { + It("should default to 1e-8", func() { + Ω(5.00000001).Should(BeNumerically("~", 5.00000002)) + Ω(5.00000001).ShouldNot(BeNumerically("~", 5.0000001)) + }) + }) + + Context("and there is a precision parameter", func() { + It("should use the precision parameter", func() { + Ω(5.1).Should(BeNumerically("~", 5.19, 0.1)) + Ω(5.1).Should(BeNumerically("~", 5.01, 0.1)) + Ω(5.1).ShouldNot(BeNumerically("~", 5.22, 0.1)) + Ω(5.1).ShouldNot(BeNumerically("~", 4.98, 0.1)) + }) + }) + }) + + Context("when passed an int/uint", func() { + Context("and there is no precision parameter", func() { + It("should just do strict equality", func() { + Ω(5).Should(BeNumerically("~", 5)) + Ω(5).ShouldNot(BeNumerically("~", 6)) + Ω(uint(5)).ShouldNot(BeNumerically("~", 6)) + }) + }) + + Context("and there is a precision parameter", func() { + It("should use precision paramter", func() { + Ω(5).Should(BeNumerically("~", 6, 2)) + Ω(5).ShouldNot(BeNumerically("~", 8, 2)) + Ω(uint(5)).Should(BeNumerically("~", 6, 1)) + }) + }) + }) + }) + }) + + Context("when passed a non-number", func() { + It("should error", func() { + success, err := (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{5}}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "=="}).Match(5) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "~", CompareTo: []interface{}{3.0, "foo"}}).Match(5.0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{"bar"}}).Match(5) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{"bar"}}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{nil}}).Match(0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{0}}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed an unsupported comparator", func() { + It("should error", func() { + success, err := (&BeNumericallyMatcher{Comparator: "!=", CompareTo: []interface{}{5}}).Match(4) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go new file mode 100644 index 00000000000..d7c32233ec4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go @@ -0,0 +1,71 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeSentMatcher struct { + Arg interface{} + channelClosed bool +} + +func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.RecvDir { + return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel. Got:\n%s", format.Object(actual, 1)) + } + + argType := reflect.TypeOf(matcher.Arg) + assignable := argType.AssignableTo(channelType.Elem()) + + if !assignable { + return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1)) + } + + argValue := reflect.ValueOf(matcher.Arg) + + defer func() { + if e := recover(); e != nil { + success = false + err = fmt.Errorf("Cannot send to a closed channel") + matcher.channelClosed = true + } + }() + + winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectSend, Chan: channelValue, Send: argValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var didSend bool + if winnerIndex == 0 { + didSend = true + } + + return didSend, nil +} + +func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go new file mode 100644 index 00000000000..205d71f405f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go @@ -0,0 +1,106 @@ +package matchers_test + +import ( + . "github.com/onsi/gomega/matchers" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("BeSent", func() { + Context("when passed a channel and a matching type", func() { + Context("when the channel is ready to receive", func() { + It("should succeed and send the value down the channel", func() { + c := make(chan string) + d := make(chan string) + go func() { + val := <-c + d <- val + }() + + time.Sleep(10 * time.Millisecond) + + Ω(c).Should(BeSent("foo")) + Eventually(d).Should(Receive(Equal("foo"))) + }) + + It("should succeed (with a buffered channel)", func() { + c := make(chan string, 1) + Ω(c).Should(BeSent("foo")) + Ω(<-c).Should(Equal("foo")) + }) + }) + + Context("when the channel is not ready to receive", func() { + It("should fail and not send down the channel", func() { + c := make(chan string) + Ω(c).ShouldNot(BeSent("foo")) + Consistently(c).ShouldNot(Receive()) + }) + }) + + Context("when the channel is eventually ready to receive", func() { + It("should succeed", func() { + c := make(chan string) + d := make(chan string) + go func() { + time.Sleep(30 * time.Millisecond) + val := <-c + d <- val + }() + + Eventually(c).Should(BeSent("foo")) + Eventually(d).Should(Receive(Equal("foo"))) + }) + }) + + Context("when the channel is closed", func() { + It("should error", func() { + c := make(chan string) + close(c) + success, err := (&BeSentMatcher{Arg: "foo"}).Match(c) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + + It("should short-circuit Eventually", func() { + c := make(chan string) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(c, 10.0).Should(BeSent("foo")) + }) + Ω(failures).Should(HaveLen(1)) + Ω(time.Since(t)).Should(BeNumerically("<", time.Second)) + }) + }) + }) + + Context("when passed a channel and a non-matching type", func() { + It("should error", func() { + success, err := (&BeSentMatcher{Arg: "foo"}).Match(make(chan int, 1)) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a receive-only channel", func() { + It("should error", func() { + var c <-chan string + c = make(chan string, 1) + success, err := (&BeSentMatcher{Arg: "foo"}).Match(c) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a nonchannel", func() { + It("should error", func() { + success, err := (&BeSentMatcher{Arg: "foo"}).Match("bar") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go new file mode 100644 index 00000000000..abda4eb1e7b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go @@ -0,0 +1,65 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "time" +) + +type BeTemporallyMatcher struct { + Comparator string + CompareTo time.Time + Threshold []time.Duration +} + +func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) { + // predicate to test for time.Time type + isTime := func(t interface{}) bool { + _, ok := t.(time.Time) + return ok + } + + if !isTime(actual) { + return false, fmt.Errorf("Expected a time.Time. Got:\n%s", format.Object(actual, 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + var threshold = time.Millisecond + if len(matcher.Threshold) == 1 { + threshold = matcher.Threshold[0] + } + + return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil +} + +func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) { + switch matcher.Comparator { + case "==": + return actual.Equal(compareTo) + case "~": + diff := actual.Sub(compareTo) + return -threshold <= diff && diff <= threshold + case ">": + return actual.After(compareTo) + case ">=": + return !actual.Before(compareTo) + case "<": + return actual.Before(compareTo) + case "<=": + return !actual.After(compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go new file mode 100644 index 00000000000..feb33e5dc13 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go @@ -0,0 +1,98 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" + "time" +) + +var _ = Describe("BeTemporally", func() { + + var t0, t1, t2 time.Time + BeforeEach(func() { + t0 = time.Now() + t1 = t0.Add(time.Second) + t2 = t0.Add(-time.Second) + }) + + Context("When comparing times", func() { + + It("should support ==", func() { + Ω(t0).Should(BeTemporally("==", t0)) + Ω(t1).ShouldNot(BeTemporally("==", t0)) + Ω(t0).ShouldNot(BeTemporally("==", t1)) + Ω(t0).ShouldNot(BeTemporally("==", time.Time{})) + }) + + It("should support >", func() { + Ω(t0).Should(BeTemporally(">", t2)) + Ω(t0).ShouldNot(BeTemporally(">", t0)) + Ω(t2).ShouldNot(BeTemporally(">", t0)) + }) + + It("should support <", func() { + Ω(t0).Should(BeTemporally("<", t1)) + Ω(t0).ShouldNot(BeTemporally("<", t0)) + Ω(t1).ShouldNot(BeTemporally("<", t0)) + }) + + It("should support >=", func() { + Ω(t0).Should(BeTemporally(">=", t2)) + Ω(t0).Should(BeTemporally(">=", t0)) + Ω(t0).ShouldNot(BeTemporally(">=", t1)) + }) + + It("should support <=", func() { + Ω(t0).Should(BeTemporally("<=", t1)) + Ω(t0).Should(BeTemporally("<=", t0)) + Ω(t0).ShouldNot(BeTemporally("<=", t2)) + }) + + Context("when passed ~", func() { + Context("and there is no precision parameter", func() { + BeforeEach(func() { + t1 = t0.Add(time.Millisecond / 2) + t2 = t0.Add(-2 * time.Millisecond) + }) + It("should approximate", func() { + Ω(t0).Should(BeTemporally("~", t0)) + Ω(t0).Should(BeTemporally("~", t1)) + Ω(t0).ShouldNot(BeTemporally("~", t2)) + }) + }) + + Context("and there is a precision parameter", func() { + BeforeEach(func() { + t2 = t0.Add(3 * time.Second) + }) + It("should use precision paramter", func() { + d := 2 * time.Second + Ω(t0).Should(BeTemporally("~", t0, d)) + Ω(t0).Should(BeTemporally("~", t1, d)) + Ω(t0).ShouldNot(BeTemporally("~", t2, d)) + }) + }) + }) + }) + + Context("when passed a non-time", func() { + It("should error", func() { + success, err := (&BeTemporallyMatcher{Comparator: "==", CompareTo: t0}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&BeTemporallyMatcher{Comparator: "=="}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed an unsupported comparator", func() { + It("should error", func() { + success, err := (&BeTemporallyMatcher{Comparator: "!=", CompareTo: t0}).Match(t2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go new file mode 100644 index 00000000000..1275e5fc9d8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go @@ -0,0 +1,25 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type BeTrueMatcher struct { +} + +func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual.(bool), nil +} + +func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be true") +} + +func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be true") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go new file mode 100644 index 00000000000..ca32e56beaf --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher_test.go @@ -0,0 +1,20 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("BeTrue", func() { + It("should handle true and false correctly", func() { + Ω(true).Should(BeTrue()) + Ω(false).ShouldNot(BeTrue()) + }) + + It("should only support booleans", func() { + success, err := (&BeTrueMatcher{}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go new file mode 100644 index 00000000000..b39c9144be7 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go @@ -0,0 +1,27 @@ +package matchers + +import ( + "github.com/onsi/gomega/format" + "reflect" +) + +type BeZeroMatcher struct { +} + +func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return true, nil + } + zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() + + return reflect.DeepEqual(zeroValue, actual), nil + +} + +func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be zero-valued") +} + +func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be zero-valued") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go new file mode 100644 index 00000000000..8ec3643c28a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher_test.go @@ -0,0 +1,30 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("BeZero", func() { + It("should succeed if the passed in object is the zero value for its type", func() { + Ω(nil).Should(BeZero()) + + Ω("").Should(BeZero()) + Ω(" ").ShouldNot(BeZero()) + + Ω(0).Should(BeZero()) + Ω(1).ShouldNot(BeZero()) + + Ω(0.0).Should(BeZero()) + Ω(0.1).ShouldNot(BeZero()) + + // Ω([]int{}).Should(BeZero()) + Ω([]int{1}).ShouldNot(BeZero()) + + // Ω(map[string]int{}).Should(BeZero()) + Ω(map[string]int{"a": 1}).ShouldNot(BeZero()) + + Ω(myCustomType{}).Should(BeZero()) + Ω(myCustomType{s: "a"}).ShouldNot(BeZero()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go new file mode 100644 index 00000000000..7b0e0886842 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -0,0 +1,80 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" +) + +type ConsistOfMatcher struct { + Elements []interface{} +} + +func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elements := matcher.Elements + if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) { + elements = []interface{}{} + value := reflect.ValueOf(matcher.Elements[0]) + for i := 0; i < value.Len(); i++ { + elements = append(elements, value.Index(i).Interface()) + } + } + + matchers := []interface{}{} + for _, element := range elements { + matcher, isMatcher := element.(omegaMatcher) + if !isMatcher { + matcher = &EqualMatcher{Expected: element} + } + matchers = append(matchers, matcher) + } + + values := matcher.valuesOf(actual) + + if len(values) != len(matchers) { + return false, nil + } + + neighbours := func(v, m interface{}) (bool, error) { + match, err := m.(omegaMatcher).Match(v) + return match && err == nil, nil + } + + bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours) + if err != nil { + return false, err + } + + return len(bipartiteGraph.LargestMatching()) == len(values), nil +} + +func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} { + value := reflect.ValueOf(actual) + values := []interface{}{} + if isMap(actual) { + keys := value.MapKeys() + for i := 0; i < value.Len(); i++ { + values = append(values, value.MapIndex(keys[i]).Interface()) + } + } else { + for i := 0; i < value.Len(); i++ { + values = append(values, value.Index(i).Interface()) + } + } + + return values +} + +func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to consist of", matcher.Elements) +} + +func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to consist of", matcher.Elements) +} diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of_test.go b/vendor/github.com/onsi/gomega/matchers/consist_of_test.go new file mode 100644 index 00000000000..dcd1afe947a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/consist_of_test.go @@ -0,0 +1,75 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("ConsistOf", func() { + Context("with a slice", func() { + It("should do the right thing", func() { + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("baz", "bar", "foo")) + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "foo")) + }) + }) + + Context("with an array", func() { + It("should do the right thing", func() { + Ω([3]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Ω([3]string{"foo", "bar", "baz"}).Should(ConsistOf("baz", "bar", "foo")) + Ω([3]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) + Ω([3]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "foo")) + }) + }) + + Context("with a map", func() { + It("should apply to the values", func() { + Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).Should(ConsistOf("foo", "bar", "baz")) + Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).Should(ConsistOf("baz", "bar", "foo")) + Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) + Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).ShouldNot(ConsistOf("baz", "foo")) + }) + + }) + + Context("with anything else", func() { + It("should error", func() { + failures := InterceptGomegaFailures(func() { + Ω("foo").Should(ConsistOf("f", "o", "o")) + }) + + Ω(failures).Should(HaveLen(1)) + }) + }) + + Context("when passed matchers", func() { + It("should pass if the matchers pass", func() { + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", MatchRegexp("^ba"), "baz")) + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"))) + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("foo"))) + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("^ba"))) + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("turducken"))) + }) + + It("should not depend on the order of the matchers", func() { + Ω([][]int{[]int{1, 2}, []int{2}}).Should(ConsistOf(ContainElement(1), ContainElement(2))) + Ω([][]int{[]int{1, 2}, []int{2}}).Should(ConsistOf(ContainElement(2), ContainElement(1))) + }) + + Context("when a matcher errors", func() { + It("should soldier on", func() { + Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf(BeFalse(), "foo", "bar")) + Ω([]interface{}{"foo", "bar", false}).Should(ConsistOf(BeFalse(), ContainSubstring("foo"), "bar")) + }) + }) + }) + + Context("when passed exactly one argument, and that argument is a slice", func() { + It("should match against the elements of that argument", func() { + Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf([]string{"foo", "bar", "baz"})) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go new file mode 100644 index 00000000000..4159335d0d5 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -0,0 +1,56 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ContainElementMatcher struct { + Element interface{} +} + +func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) + if !elementIsMatcher { + elemMatcher = &EqualMatcher{Expected: matcher.Element} + } + + value := reflect.ValueOf(actual) + var keys []reflect.Value + if isMap(actual) { + keys = value.MapKeys() + } + var lastError error + for i := 0; i < value.Len(); i++ { + var success bool + var err error + if isMap(actual) { + success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface()) + } else { + success, err = elemMatcher.Match(value.Index(i).Interface()) + } + if err != nil { + lastError = err + continue + } + if success { + return true, nil + } + } + + return false, lastError +} + +func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain element matching", matcher.Element) +} + +func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain element matching", matcher.Element) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go new file mode 100644 index 00000000000..38ee518fbce --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher_test.go @@ -0,0 +1,76 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("ContainElement", func() { + Context("when passed a supported type", func() { + Context("and expecting a non-matcher", func() { + It("should do the right thing", func() { + Ω([2]int{1, 2}).Should(ContainElement(2)) + Ω([2]int{1, 2}).ShouldNot(ContainElement(3)) + + Ω([]int{1, 2}).Should(ContainElement(2)) + Ω([]int{1, 2}).ShouldNot(ContainElement(3)) + + Ω(map[string]int{"foo": 1, "bar": 2}).Should(ContainElement(2)) + Ω(map[int]int{3: 1, 4: 2}).ShouldNot(ContainElement(3)) + + arr := make([]myCustomType, 2) + arr[0] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}} + arr[1] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "c"}} + Ω(arr).Should(ContainElement(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}})) + Ω(arr).ShouldNot(ContainElement(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"b", "c"}})) + }) + }) + + Context("and expecting a matcher", func() { + It("should pass each element through the matcher", func() { + Ω([]int{1, 2, 3}).Should(ContainElement(BeNumerically(">=", 3))) + Ω([]int{1, 2, 3}).ShouldNot(ContainElement(BeNumerically(">", 3))) + Ω(map[string]int{"foo": 1, "bar": 2}).Should(ContainElement(BeNumerically(">=", 2))) + Ω(map[string]int{"foo": 1, "bar": 2}).ShouldNot(ContainElement(BeNumerically(">", 2))) + }) + + It("should power through even if the matcher ever fails", func() { + Ω([]interface{}{1, 2, "3", 4}).Should(ContainElement(BeNumerically(">=", 3))) + }) + + It("should fail if the matcher fails", func() { + actual := []interface{}{1, 2, "3", "4"} + success, err := (&ContainElementMatcher{Element: BeNumerically(">=", 3)}).Match(actual) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilSlice []int + Ω(nilSlice).ShouldNot(ContainElement(1)) + + var nilMap map[int]string + Ω(nilMap).ShouldNot(ContainElement("foo")) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&ContainElementMatcher{Element: 0}).Match(0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&ContainElementMatcher{Element: 0}).Match("abc") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&ContainElementMatcher{Element: 0}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go new file mode 100644 index 00000000000..2e7608921ac --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go @@ -0,0 +1,37 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "strings" +) + +type ContainSubstringMatcher struct { + Substr string + Args []interface{} +} + +func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + + return strings.Contains(actualString, matcher.stringToMatch()), nil +} + +func (matcher *ContainSubstringMatcher) stringToMatch() string { + stringToMatch := matcher.Substr + if len(matcher.Args) > 0 { + stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...) + } + return stringToMatch +} + +func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain substring", matcher.stringToMatch()) +} + +func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain substring", matcher.stringToMatch()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go new file mode 100644 index 00000000000..6935168e5c0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go @@ -0,0 +1,36 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("ContainSubstringMatcher", func() { + Context("when actual is a string", func() { + It("should match against the string", func() { + Ω("Marvelous").Should(ContainSubstring("rve")) + Ω("Marvelous").ShouldNot(ContainSubstring("boo")) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Ω("Marvelous3").Should(ContainSubstring("velous%d", 3)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match agains the returned string", func() { + Ω(&myStringer{a: "Abc3"}).Should(ContainSubstring("bc3")) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&ContainSubstringMatcher{Substr: "2"}).Match(2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go new file mode 100644 index 00000000000..d186597379b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go @@ -0,0 +1,27 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type EqualMatcher struct { + Expected interface{} +} + +func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + return reflect.DeepEqual(actual, matcher.Expected), nil +} + +func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to equal", matcher.Expected) +} + +func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to equal", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go new file mode 100644 index 00000000000..ef0d137dda4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher_test.go @@ -0,0 +1,44 @@ +package matchers_test + +import ( + "errors" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("Equal", func() { + Context("when asserting that nil equals nil", func() { + It("should error", func() { + success, err := (&EqualMatcher{Expected: nil}).Match(nil) + + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("When asserting equality between objects", func() { + It("should do the right thing", func() { + Ω(5).Should(Equal(5)) + Ω(5.0).Should(Equal(5.0)) + + Ω(5).ShouldNot(Equal("5")) + Ω(5).ShouldNot(Equal(5.0)) + Ω(5).ShouldNot(Equal(3)) + + Ω("5").Should(Equal("5")) + Ω([]int{1, 2}).Should(Equal([]int{1, 2})) + Ω([]int{1, 2}).ShouldNot(Equal([]int{2, 1})) + Ω(map[string]string{"a": "b", "c": "d"}).Should(Equal(map[string]string{"a": "b", "c": "d"})) + Ω(map[string]string{"a": "b", "c": "d"}).ShouldNot(Equal(map[string]string{"a": "b", "c": "e"})) + Ω(errors.New("foo")).Should(Equal(errors.New("foo"))) + Ω(errors.New("foo")).ShouldNot(Equal(errors.New("bar"))) + + Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).Should(Equal(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}})) + Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "bar", n: 3, f: 2.0, arr: []string{"a", "b"}})) + Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 2, f: 2.0, arr: []string{"a", "b"}})) + Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 3, f: 3.0, arr: []string{"a", "b"}})) + Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b", "c"}})) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go new file mode 100644 index 00000000000..7ace93dc36d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go @@ -0,0 +1,28 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveCapMatcher struct { + Count int +} + +func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := capOf(actual) + if !ok { + return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == matcher.Count, nil +} + +func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count) +} + +func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go new file mode 100644 index 00000000000..a92a177b584 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher_test.go @@ -0,0 +1,50 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveCap", func() { + Context("when passed a supported type", func() { + It("should do the right thing", func() { + Ω([0]int{}).Should(HaveCap(0)) + Ω([2]int{1}).Should(HaveCap(2)) + + Ω([]int{}).Should(HaveCap(0)) + Ω([]int{1, 2, 3, 4, 5}[:2]).Should(HaveCap(5)) + Ω(make([]int, 0, 5)).Should(HaveCap(5)) + + c := make(chan bool, 3) + Ω(c).Should(HaveCap(3)) + c <- true + c <- true + Ω(c).Should(HaveCap(3)) + + Ω(make(chan bool)).Should(HaveCap(0)) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilSlice []int + Ω(nilSlice).Should(HaveCap(0)) + + var nilChan chan int + Ω(nilChan).Should(HaveCap(0)) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&HaveCapMatcher{Count: 0}).Match(0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&HaveCapMatcher{Count: 0}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go new file mode 100644 index 00000000000..5701ba6e24c --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -0,0 +1,53 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type HaveKeyMatcher struct { + Key interface{} +} + +func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + return true, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "to have key matching", matcher.Key) + default: + return format.Message(actual, "to have key", matcher.Key) + } +} + +func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "not to have key matching", matcher.Key) + default: + return format.Message(actual, "not to have key", matcher.Key) + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go new file mode 100644 index 00000000000..c663e302bac --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher_test.go @@ -0,0 +1,73 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveKey", func() { + var ( + stringKeys map[string]int + intKeys map[int]string + objKeys map[*myCustomType]string + + customA *myCustomType + customB *myCustomType + ) + BeforeEach(func() { + stringKeys = map[string]int{"foo": 2, "bar": 3} + intKeys = map[int]string{2: "foo", 3: "bar"} + + customA = &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}} + customB = &myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}} + objKeys = map[*myCustomType]string{customA: "aardvark", customB: "kangaroo"} + }) + + Context("when passed a map", func() { + It("should do the right thing", func() { + Ω(stringKeys).Should(HaveKey("foo")) + Ω(stringKeys).ShouldNot(HaveKey("baz")) + + Ω(intKeys).Should(HaveKey(2)) + Ω(intKeys).ShouldNot(HaveKey(4)) + + Ω(objKeys).Should(HaveKey(customA)) + Ω(objKeys).Should(HaveKey(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}})) + Ω(objKeys).ShouldNot(HaveKey(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"apple", "pie"}})) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilMap map[int]string + Ω(nilMap).ShouldNot(HaveKey("foo")) + }) + }) + + Context("when the passed in key is actually a matcher", func() { + It("should pass each element through the matcher", func() { + Ω(stringKeys).Should(HaveKey(ContainSubstring("oo"))) + Ω(stringKeys).ShouldNot(HaveKey(ContainSubstring("foobar"))) + }) + + It("should fail if the matcher ever fails", func() { + actual := map[int]string{1: "a", 3: "b", 2: "c"} + success, err := (&HaveKeyMatcher{Key: ContainSubstring("ar")}).Match(actual) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed something that is not a map", func() { + It("should error", func() { + success, err := (&HaveKeyMatcher{Key: "foo"}).Match([]string{"foo"}) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&HaveKeyMatcher{Key: "foo"}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go new file mode 100644 index 00000000000..464ac187e90 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -0,0 +1,73 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type HaveKeyWithValueMatcher struct { + Key interface{} + Value interface{} +} + +func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + valueMatcher, valueIsMatcher := matcher.Value.(omegaMatcher) + if !valueIsMatcher { + valueMatcher = &EqualMatcher{Expected: matcher.Value} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + actualValue := reflect.ValueOf(actual).MapIndex(keys[i]) + success, err := valueMatcher.Match(actualValue.Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error()) + } + return success, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) { + str := "to have {key: value}" + if _, ok := matcher.Key.(omegaMatcher); ok { + str += " matching" + } else if _, ok := matcher.Value.(omegaMatcher); ok { + str += " matching" + } + + expect := make(map[interface{}]interface{}, 1) + expect[matcher.Key] = matcher.Value + return format.Message(actual, str, expect) +} + +func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + kStr := "not to have key" + if _, ok := matcher.Key.(omegaMatcher); ok { + kStr = "not to have key matching" + } + + vStr := "or that key's value not be" + if _, ok := matcher.Value.(omegaMatcher); ok { + vStr = "or to have that key's value not matching" + } + + return format.Message(actual, kStr, matcher.Key, vStr, matcher.Value) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go new file mode 100644 index 00000000000..06a2242aeca --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go @@ -0,0 +1,82 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveKeyWithValue", func() { + var ( + stringKeys map[string]int + intKeys map[int]string + objKeys map[*myCustomType]*myCustomType + + customA *myCustomType + customB *myCustomType + ) + BeforeEach(func() { + stringKeys = map[string]int{"foo": 2, "bar": 3} + intKeys = map[int]string{2: "foo", 3: "bar"} + + customA = &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}} + customB = &myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}} + objKeys = map[*myCustomType]*myCustomType{customA: customA, customB: customA} + }) + + Context("when passed a map", func() { + It("should do the right thing", func() { + Ω(stringKeys).Should(HaveKeyWithValue("foo", 2)) + Ω(stringKeys).ShouldNot(HaveKeyWithValue("foo", 1)) + Ω(stringKeys).ShouldNot(HaveKeyWithValue("baz", 2)) + Ω(stringKeys).ShouldNot(HaveKeyWithValue("baz", 1)) + + Ω(intKeys).Should(HaveKeyWithValue(2, "foo")) + Ω(intKeys).ShouldNot(HaveKeyWithValue(4, "foo")) + Ω(intKeys).ShouldNot(HaveKeyWithValue(2, "baz")) + + Ω(objKeys).Should(HaveKeyWithValue(customA, customA)) + Ω(objKeys).Should(HaveKeyWithValue(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}}, &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}})) + Ω(objKeys).ShouldNot(HaveKeyWithValue(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"apple", "pie"}}, customA)) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilMap map[int]string + Ω(nilMap).ShouldNot(HaveKeyWithValue("foo", "bar")) + }) + }) + + Context("when the passed in key or value is actually a matcher", func() { + It("should pass each element through the matcher", func() { + Ω(stringKeys).Should(HaveKeyWithValue(ContainSubstring("oo"), 2)) + Ω(intKeys).Should(HaveKeyWithValue(2, ContainSubstring("oo"))) + Ω(stringKeys).ShouldNot(HaveKeyWithValue(ContainSubstring("foobar"), 2)) + }) + + It("should fail if the matcher ever fails", func() { + actual := map[int]string{1: "a", 3: "b", 2: "c"} + success, err := (&HaveKeyWithValueMatcher{Key: ContainSubstring("ar"), Value: 2}).Match(actual) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + otherActual := map[string]int{"a": 1, "b": 2, "c": 3} + success, err = (&HaveKeyWithValueMatcher{Key: "a", Value: ContainSubstring("1")}).Match(otherActual) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed something that is not a map", func() { + It("should error", func() { + success, err := (&HaveKeyWithValueMatcher{Key: "foo", Value: "bar"}).Match([]string{"foo"}) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&HaveKeyWithValueMatcher{Key: "foo", Value: "bar"}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go new file mode 100644 index 00000000000..a1837755701 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go @@ -0,0 +1,27 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type HaveLenMatcher struct { + Count int +} + +func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == matcher.Count, nil +} + +func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count) +} + +func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go new file mode 100644 index 00000000000..1e6aa69d9d1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher_test.go @@ -0,0 +1,53 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveLen", func() { + Context("when passed a supported type", func() { + It("should do the right thing", func() { + Ω("").Should(HaveLen(0)) + Ω("AA").Should(HaveLen(2)) + + Ω([0]int{}).Should(HaveLen(0)) + Ω([2]int{1, 2}).Should(HaveLen(2)) + + Ω([]int{}).Should(HaveLen(0)) + Ω([]int{1, 2, 3}).Should(HaveLen(3)) + + Ω(map[string]int{}).Should(HaveLen(0)) + Ω(map[string]int{"a": 1, "b": 2, "c": 3, "d": 4}).Should(HaveLen(4)) + + c := make(chan bool, 3) + Ω(c).Should(HaveLen(0)) + c <- true + c <- true + Ω(c).Should(HaveLen(2)) + }) + }) + + Context("when passed a correctly typed nil", func() { + It("should operate succesfully on the passed in value", func() { + var nilSlice []int + Ω(nilSlice).Should(HaveLen(0)) + + var nilMap map[int]string + Ω(nilMap).Should(HaveLen(0)) + }) + }) + + Context("when passed an unsupported type", func() { + It("should error", func() { + success, err := (&HaveLenMatcher{Count: 0}).Match(0) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&HaveLenMatcher{Count: 0}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go new file mode 100644 index 00000000000..ebdd71786d8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveOccurredMatcher struct { +} + +func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) { + // is purely nil? + if actual == nil { + return false, nil + } + + // must be an 'error' type + if !isError(actual) { + return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1)) + } + + // must be non-nil (or a pointer to a non-nil) + return !isNil(actual), nil +} + +func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1)) +} + +func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "not to have occurred") +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go new file mode 100644 index 00000000000..009e23e5fce --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go @@ -0,0 +1,58 @@ +package matchers_test + +import ( + "errors" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +type CustomErr struct { + msg string +} + +func (e *CustomErr) Error() string { + return e.msg +} + +var _ = Describe("HaveOccurred", func() { + It("should succeed if matching an error", func() { + Ω(errors.New("Foo")).Should(HaveOccurred()) + }) + + It("should not succeed with nil", func() { + Ω(nil).ShouldNot(HaveOccurred()) + }) + + It("should only support errors and nil", func() { + success, err := (&HaveOccurredMatcher{}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&HaveOccurredMatcher{}).Match("") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + + It("doesn't support non-error type", func() { + success, err := (&HaveOccurredMatcher{}).Match(AnyType{}) + Ω(success).Should(BeFalse()) + Ω(err).Should(MatchError("Expected an error-type. Got:\n : {}")) + }) + + It("doesn't support non-error pointer type", func() { + success, err := (&HaveOccurredMatcher{}).Match(&AnyType{}) + Ω(success).Should(BeFalse()) + Ω(err).Should(MatchError(MatchRegexp(`Expected an error-type. Got:\n <*matchers_test.AnyType | 0x[[:xdigit:]]+>: {}`))) + }) + + It("should succeed with pointer types that conform to error interface", func() { + err := &CustomErr{"ohai"} + Ω(err).Should(HaveOccurred()) + }) + + It("should not succeed with nil pointers to types that conform to error interface", func() { + var err *CustomErr = nil + Ω(err).ShouldNot(HaveOccurred()) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go new file mode 100644 index 00000000000..8b63a89997b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go @@ -0,0 +1,35 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type HavePrefixMatcher struct { + Prefix string + Args []interface{} +} + +func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + prefix := matcher.prefix() + return len(actualString) >= len(prefix) && actualString[0:len(prefix)] == prefix, nil +} + +func (matcher *HavePrefixMatcher) prefix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Prefix, matcher.Args...) + } + return matcher.Prefix +} + +func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have prefix", matcher.prefix()) +} + +func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have prefix", matcher.prefix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go new file mode 100644 index 00000000000..bec3f975827 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher_test.go @@ -0,0 +1,36 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HavePrefixMatcher", func() { + Context("when actual is a string", func() { + It("should match a string prefix", func() { + Ω("Ab").Should(HavePrefix("A")) + Ω("A").ShouldNot(HavePrefix("Ab")) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Ω("C3PO").Should(HavePrefix("C%dP", 3)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match against the returned string", func() { + Ω(&myStringer{a: "Ab"}).Should(HavePrefix("A")) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&HavePrefixMatcher{Prefix: "2"}).Match(2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go new file mode 100644 index 00000000000..afc78fc9019 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go @@ -0,0 +1,35 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" +) + +type HaveSuffixMatcher struct { + Suffix string + Args []interface{} +} + +func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + suffix := matcher.suffix() + return len(actualString) >= len(suffix) && actualString[len(actualString)-len(suffix):] == suffix, nil +} + +func (matcher *HaveSuffixMatcher) suffix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Suffix, matcher.Args...) + } + return matcher.Suffix +} + +func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have suffix", matcher.suffix()) +} + +func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have suffix", matcher.suffix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go new file mode 100644 index 00000000000..72e8975bae0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher_test.go @@ -0,0 +1,36 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("HaveSuffixMatcher", func() { + Context("when actual is a string", func() { + It("should match a string suffix", func() { + Ω("Ab").Should(HaveSuffix("b")) + Ω("A").ShouldNot(HaveSuffix("Ab")) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Ω("C3PO").Should(HaveSuffix("%dPO", 3)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match against the returned string", func() { + Ω(&myStringer{a: "Ab"}).Should(HaveSuffix("b")) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&HaveSuffixMatcher{Suffix: "2"}).Match(2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go new file mode 100644 index 00000000000..03cdf045888 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -0,0 +1,50 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type MatchErrorMatcher struct { + Expected interface{} +} + +func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) { + if isNil(actual) { + return false, fmt.Errorf("Expected an error, got nil") + } + + if !isError(actual) { + return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1)) + } + + actualErr := actual.(error) + + if isString(matcher.Expected) { + return reflect.DeepEqual(actualErr.Error(), matcher.Expected), nil + } + + if isError(matcher.Expected) { + return reflect.DeepEqual(actualErr, matcher.Expected), nil + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + if matcher.Expected != nil { + subMatcher, hasSubMatcher = (matcher.Expected).(omegaMatcher) + if hasSubMatcher { + return subMatcher.Match(actualErr.Error()) + } + } + + return false, fmt.Errorf("MatchError must be passed an error, string, or Matcher that can match on strings. Got:\n%s", format.Object(matcher.Expected, 1)) +} + +func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match error", matcher.Expected) +} + +func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match error", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go new file mode 100644 index 00000000000..338b5129543 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go @@ -0,0 +1,93 @@ +package matchers_test + +import ( + "errors" + "fmt" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +type CustomError struct { +} + +func (c CustomError) Error() string { + return "an error" +} + +var _ = Describe("MatchErrorMatcher", func() { + Context("When asserting against an error", func() { + It("should succeed when matching with an error", func() { + err := errors.New("an error") + fmtErr := fmt.Errorf("an error") + customErr := CustomError{} + + Ω(err).Should(MatchError(errors.New("an error"))) + Ω(err).ShouldNot(MatchError(errors.New("another error"))) + + Ω(fmtErr).Should(MatchError(errors.New("an error"))) + Ω(customErr).Should(MatchError(CustomError{})) + }) + + It("should succeed when matching with a string", func() { + err := errors.New("an error") + fmtErr := fmt.Errorf("an error") + customErr := CustomError{} + + Ω(err).Should(MatchError("an error")) + Ω(err).ShouldNot(MatchError("another error")) + + Ω(fmtErr).Should(MatchError("an error")) + Ω(customErr).Should(MatchError("an error")) + }) + + Context("when passed a matcher", func() { + It("should pass if the matcher passes against the error string", func() { + err := errors.New("error 123 abc") + + Ω(err).Should(MatchError(MatchRegexp(`\d{3}`))) + }) + + It("should fail if the matcher fails against the error string", func() { + err := errors.New("no digits") + Ω(err).ShouldNot(MatchError(MatchRegexp(`\d`))) + }) + }) + + It("should fail when passed anything else", func() { + actualErr := errors.New("an error") + _, err := (&MatchErrorMatcher{ + Expected: []byte("an error"), + }).Match(actualErr) + Ω(err).Should(HaveOccurred()) + + _, err = (&MatchErrorMatcher{ + Expected: 3, + }).Match(actualErr) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed nil", func() { + It("should fail", func() { + _, err := (&MatchErrorMatcher{ + Expected: "an error", + }).Match(nil) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a non-error", func() { + It("should fail", func() { + _, err := (&MatchErrorMatcher{ + Expected: "an error", + }).Match("an error") + Ω(err).Should(HaveOccurred()) + + _, err = (&MatchErrorMatcher{ + Expected: "an error", + }).Match(3) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go new file mode 100644 index 00000000000..e61978a1de8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go @@ -0,0 +1,64 @@ +package matchers + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type MatchJSONMatcher struct { + JSONToMatch interface{} +} + +func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.prettyPrint(actual) + if err != nil { + return false, err + } + + var aval interface{} + var eval interface{} + + // this is guarded by prettyPrint + json.Unmarshal([]byte(actualString), &aval) + json.Unmarshal([]byte(expectedString), &eval) + + return reflect.DeepEqual(aval, eval), nil +} + +func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return format.Message(actualString, "to match JSON of", expectedString) +} + +func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return format.Message(actualString, "not to match JSON of", expectedString) +} + +func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, ok := toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok := toString(matcher.JSONToMatch) + if !ok { + return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.JSONToMatch, 1)) + } + + abuf := new(bytes.Buffer) + ebuf := new(bytes.Buffer) + + if err := json.Indent(abuf, []byte(actualString), "", " "); err != nil { + return "", "", fmt.Errorf("Actual '%s' should be valid JSON, but it is not.\nUnderlying error:%s", actualString, err) + } + + if err := json.Indent(ebuf, []byte(expectedString), "", " "); err != nil { + return "", "", fmt.Errorf("Expected '%s' should be valid JSON, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return abuf.String(), ebuf.String(), nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go new file mode 100644 index 00000000000..755c4ad8270 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher_test.go @@ -0,0 +1,73 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("MatchJSONMatcher", func() { + Context("When passed stringifiables", func() { + It("should succeed if the JSON matches", func() { + Ω("{}").Should(MatchJSON("{}")) + Ω(`{"a":1}`).Should(MatchJSON(`{"a":1}`)) + Ω(`{ + "a":1 + }`).Should(MatchJSON(`{"a":1}`)) + Ω(`{"a":1, "b":2}`).Should(MatchJSON(`{"b":2, "a":1}`)) + Ω(`{"a":1}`).ShouldNot(MatchJSON(`{"b":2, "a":1}`)) + }) + + It("should work with byte arrays", func() { + Ω([]byte("{}")).Should(MatchJSON([]byte("{}"))) + Ω("{}").Should(MatchJSON([]byte("{}"))) + Ω([]byte("{}")).Should(MatchJSON("{}")) + }) + }) + + Context("when the expected is not valid JSON", func() { + It("should error and explain why", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: `{}`}).Match(`oops`) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("Actual 'oops' should be valid JSON")) + }) + }) + + Context("when the actual is not valid JSON", func() { + It("should error and explain why", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: `oops`}).Match(`{}`) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("Expected 'oops' should be valid JSON")) + }) + }) + + Context("when the expected is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: 2}).Match("{}") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n : 2")) + + success, err = (&MatchJSONMatcher{JSONToMatch: nil}).Match("{}") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n : nil")) + }) + }) + + Context("when the actual is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchJSONMatcher{JSONToMatch: "{}"}).Match(2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n : 2")) + + success, err = (&MatchJSONMatcher{JSONToMatch: "{}"}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + Ω(err.Error()).Should(ContainSubstring("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n : nil")) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go new file mode 100644 index 00000000000..7ca79a15be2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go @@ -0,0 +1,42 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "regexp" +) + +type MatchRegexpMatcher struct { + Regexp string + Args []interface{} +} + +func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1)) + } + + match, err := regexp.Match(matcher.regexp(), []byte(actualString)) + if err != nil { + return false, fmt.Errorf("RegExp match failed to compile with error:\n\t%s", err.Error()) + } + + return match, nil +} + +func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) regexp() string { + re := matcher.Regexp + if len(matcher.Args) > 0 { + re = fmt.Sprintf(matcher.Regexp, matcher.Args...) + } + return re +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go new file mode 100644 index 00000000000..bb521cce347 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go @@ -0,0 +1,44 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("MatchRegexp", func() { + Context("when actual is a string", func() { + It("should match against the string", func() { + Ω(" a2!bla").Should(MatchRegexp(`\d!`)) + Ω(" a2!bla").ShouldNot(MatchRegexp(`[A-Z]`)) + }) + }) + + Context("when actual is a stringer", func() { + It("should call the stringer and match agains the returned string", func() { + Ω(&myStringer{a: "Abc3"}).Should(MatchRegexp(`[A-Z][a-z]+\d`)) + }) + }) + + Context("when the matcher is called with multiple arguments", func() { + It("should pass the string and arguments to sprintf", func() { + Ω(" a23!bla").Should(MatchRegexp(`\d%d!`, 3)) + }) + }) + + Context("when actual is neither a string nor a stringer", func() { + It("should error", func() { + success, err := (&MatchRegexpMatcher{Regexp: `\d`}).Match(2) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when the passed in regexp fails to compile", func() { + It("should error", func() { + success, err := (&MatchRegexpMatcher{Regexp: "("}).Match("Foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go new file mode 100644 index 00000000000..69fb51a8592 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -0,0 +1,74 @@ +package matchers + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/gomega/format" + "gopkg.in/yaml.v2" +) + +type MatchYAMLMatcher struct { + YAMLToMatch interface{} +} + +func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.toStrings(actual) + if err != nil { + return false, err + } + + var aval interface{} + var eval interface{} + + if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil { + return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err) + } + if err := yaml.Unmarshal([]byte(expectedString), &eval); err != nil { + return false, fmt.Errorf("Expected '%s' should be valid YAML, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return reflect.DeepEqual(aval, eval), nil +} + +func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.toNormalisedStrings(actual) + return format.Message(actualString, "to match YAML of", expectedString) +} + +func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.toNormalisedStrings(actual) + return format.Message(actualString, "not to match YAML of", expectedString) +} + +func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, expectedString, err := matcher.toStrings(actual) + return normalise(actualString), normalise(expectedString), err +} + +func normalise(input string) string { + var val interface{} + err := yaml.Unmarshal([]byte(input), &val) + if err != nil { + panic(err) // guarded by Match + } + output, err := yaml.Marshal(val) + if err != nil { + panic(err) // guarded by Unmarshal + } + return strings.TrimSpace(string(output)) +} + +func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, ok := toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok := toString(matcher.YAMLToMatch) + if !ok { + return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.YAMLToMatch, 1)) + } + + return actualString, expectedString, nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go new file mode 100644 index 00000000000..8e63de19e35 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher_test.go @@ -0,0 +1,94 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("MatchYAMLMatcher", func() { + Context("When passed stringifiables", func() { + It("should succeed if the YAML matches", func() { + Expect("---").Should(MatchYAML("")) + Expect("a: 1").Should(MatchYAML(`{"a":1}`)) + Expect("a: 1\nb: 2").Should(MatchYAML(`{"b":2, "a":1}`)) + }) + + It("should explain if the YAML does not match when it should", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 1"}).FailureMessage("b: 2") + Expect(message).To(MatchRegexp(`Expected\s+: b: 2\s+to match YAML of\s+: a: 1`)) + }) + + It("should normalise the expected and actual when explaining if the YAML does not match when it should", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 'one'"}).FailureMessage("{b: two}") + Expect(message).To(MatchRegexp(`Expected\s+: b: two\s+to match YAML of\s+: a: one`)) + }) + + It("should explain if the YAML matches when it should not", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 1"}).NegatedFailureMessage("a: 1") + Expect(message).To(MatchRegexp(`Expected\s+: a: 1\s+not to match YAML of\s+: a: 1`)) + }) + + It("should normalise the expected and actual when explaining if the YAML matches when it should not", func() { + message := (&MatchYAMLMatcher{YAMLToMatch: "a: 'one'"}).NegatedFailureMessage("{a: one}") + Expect(message).To(MatchRegexp(`Expected\s+: a: one\s+not to match YAML of\s+: a: one`)) + }) + + It("should fail if the YAML does not match", func() { + Expect("a: 1").ShouldNot(MatchYAML(`{"b":2, "a":1}`)) + }) + + It("should work with byte arrays", func() { + Expect([]byte("a: 1")).Should(MatchYAML([]byte("a: 1"))) + Expect("a: 1").Should(MatchYAML([]byte("a: 1"))) + Expect([]byte("a: 1")).Should(MatchYAML("a: 1")) + }) + }) + + Context("when the expected is not valid YAML", func() { + It("should error and explain why", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: ""}).Match("good:\nbad") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("Actual 'good:\nbad' should be valid YAML")) + }) + }) + + Context("when the actual is not valid YAML", func() { + It("should error and explain why", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: "good:\nbad"}).Match("") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("Expected 'good:\nbad' should be valid YAML")) + }) + }) + + Context("when the expected is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: 2}).Match("") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n : 2")) + + success, err = (&MatchYAMLMatcher{YAMLToMatch: nil}).Match("") + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n : nil")) + }) + }) + + Context("when the actual is neither a string nor a stringer nor a byte array", func() { + It("should error", func() { + success, err := (&MatchYAMLMatcher{YAMLToMatch: ""}).Match(2) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n : 2")) + + success, err = (&MatchYAMLMatcher{YAMLToMatch: ""}).Match(nil) + Expect(success).Should(BeFalse()) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n : nil")) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go b/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go new file mode 100644 index 00000000000..01b11b97d6e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go @@ -0,0 +1,30 @@ +package matchers_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type myStringer struct { + a string +} + +func (s *myStringer) String() string { + return s.a +} + +type StringAlias string + +type myCustomType struct { + s string + n int + f float32 + arr []string +} + +func Test(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Gomega Matchers") +} diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go new file mode 100644 index 00000000000..2c91670bd9b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/not.go @@ -0,0 +1,30 @@ +package matchers + +import ( + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type NotMatcher struct { + Matcher types.GomegaMatcher +} + +func (m *NotMatcher) Match(actual interface{}) (bool, error) { + success, err := m.Matcher.Match(actual) + if err != nil { + return false, err + } + return !success, nil +} + +func (m *NotMatcher) FailureMessage(actual interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.Matcher.FailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value +} diff --git a/vendor/github.com/onsi/gomega/matchers/not_test.go b/vendor/github.com/onsi/gomega/matchers/not_test.go new file mode 100644 index 00000000000..b3c1fdbf0b2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/not_test.go @@ -0,0 +1,57 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("NotMatcher", func() { + Context("basic examples", func() { + It("works", func() { + Expect(input).To(Not(false1)) + Expect(input).To(Not(Not(true2))) + Expect(input).ToNot(Not(true3)) + Expect(input).ToNot(Not(Not(false1))) + Expect(input).To(Not(Not(Not(false2)))) + }) + }) + + Context("De Morgan's laws", func() { + It("~(A && B) == ~A || ~B", func() { + Expect(input).To(Not(And(false1, false2))) + Expect(input).To(Or(Not(false1), Not(false2))) + }) + It("~(A || B) == ~A && ~B", func() { + Expect(input).To(Not(Or(false1, false2))) + Expect(input).To(And(Not(false1), Not(false2))) + }) + }) + + Context("failure messages are opposite of original matchers' failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(HaveLen(2)), input, "not to have length 2") + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(Not(HaveLen(3))), input, "to have length 3") + }) + }) + }) + + Context("MatchMayChangeInTheFuture()", func() { + It("Propagates value from wrapped matcher", func() { + m := Not(Or()) // an empty Or() always returns false, and indicates it cannot change + Expect(m.Match("anything")).To(BeTrue()) + Expect(m.(*NotMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse()) + }) + It("Defaults to true", func() { + m := Not(Equal(1)) // Equal does not have this method + Expect(m.Match(2)).To(BeTrue()) + Expect(m.(*NotMatcher).MatchMayChangeInTheFuture(2)).To(BeTrue()) // defaults to true + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go new file mode 100644 index 00000000000..3bf7998001d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/or.go @@ -0,0 +1,67 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type OrMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstSuccessfulMatcher types.GomegaMatcher +} + +func (m *OrMatcher) Match(actual interface{}) (success bool, err error) { + m.firstSuccessfulMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if err != nil { + return false, err + } + if success { + m.firstSuccessfulMatcher = matcher + return true, nil + } + } + return false, nil +} + +func (m *OrMatcher) FailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers)) +} + +func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.firstSuccessfulMatcher.NegatedFailureMessage(actual) +} + +func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: F, T, => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become F + + Match eval: F, F, F => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to T. + */ + + if m.firstSuccessfulMatcher != nil { + // one of the matchers succeeded.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual) + } else { + // so all matchers failed.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/or_test.go b/vendor/github.com/onsi/gomega/matchers/or_test.go new file mode 100644 index 00000000000..9589a174da0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/or_test.go @@ -0,0 +1,85 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("OrMatcher", func() { + It("works with positive cases", func() { + Expect(input).To(Or(true1)) + Expect(input).To(Or(true1, true2)) + Expect(input).To(Or(true1, false1)) + Expect(input).To(Or(false1, true2)) + Expect(input).To(Or(true1, true2, true3)) + Expect(input).To(Or(true1, true2, false3)) + Expect(input).To(Or(true1, false2, true3)) + Expect(input).To(Or(false1, true2, true3)) + Expect(input).To(Or(true1, false2, false3)) + Expect(input).To(Or(false1, false2, true3)) + + // use alias + Expect(input).To(SatisfyAny(false1, false2, true3)) + }) + + It("works with negative cases", func() { + Expect(input).ToNot(Or()) + Expect(input).ToNot(Or(false1)) + Expect(input).ToNot(Or(false1, false2)) + Expect(input).ToNot(Or(false1, false2, false3)) + }) + + Context("failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Or(false1, false2), input, + "To satisfy at least one of these matchers: [%!s(*matchers.HaveLenMatcher=&{1}) %!s(*matchers.EqualMatcher=&{hip})]") + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + verifyFailureMessage(Not(Or(true1, true2)), input, `not to have length 2`) + }) + }) + }) + + Context("MatchMayChangeInTheFuture", func() { + Context("Match returned false", func() { + It("returns true if any of the matchers could change", func() { + // 3 matchers, all return false, and all could change + m := Or(BeNil(), Equal("hip"), HaveLen(1)) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // all 3 of these matchers default to 'true' + }) + It("returns false if none of the matchers could change", func() { + // empty Or() has the property of never matching, and never can change since there are no sub-matchers that could change + m := Or() + Expect(m.Match("anything")).To(BeFalse()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("anything")).To(BeFalse()) + + // Or() with 3 sub-matchers that return false, and can't change + m = Or(Or(), Or(), Or()) + Expect(m.Match("hi")).To(BeFalse()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) // the 3 empty Or()'s won't change + }) + }) + Context("Match returned true", func() { + Context("returns value of the successful matcher", func() { + It("false if successful matcher not going to change", func() { + // 3 matchers: 1st returns false, 2nd returns true and is not going to change, 3rd is never called + m := Or(BeNil(), And(), Equal(1)) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeFalse()) + }) + It("true if successful matcher indicates it might change", func() { + // 3 matchers: 1st returns false, 2nd returns true and "might" change, 3rd is never called + m := Or(Not(BeNil()), Equal("hi"), Equal(1)) + Expect(m.Match("hi")).To(BeTrue()) + Expect(m.(*OrMatcher).MatchMayChangeInTheFuture("hi")).To(BeTrue()) // Equal("hi") indicates it might change + }) + }) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go new file mode 100644 index 00000000000..75ab251bce9 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go @@ -0,0 +1,42 @@ +package matchers + +import ( + "fmt" + "github.com/onsi/gomega/format" + "reflect" +) + +type PanicMatcher struct{} + +func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return false, fmt.Errorf("PanicMatcher expects a non-nil actual.") + } + + actualType := reflect.TypeOf(actual) + if actualType.Kind() != reflect.Func { + return false, fmt.Errorf("PanicMatcher expects a function. Got:\n%s", format.Object(actual, 1)) + } + if !(actualType.NumIn() == 0 && actualType.NumOut() == 0) { + return false, fmt.Errorf("PanicMatcher expects a function with no arguments and no return value. Got:\n%s", format.Object(actual, 1)) + } + + success = false + defer func() { + if e := recover(); e != nil { + success = true + } + }() + + reflect.ValueOf(actual).Call([]reflect.Value{}) + + return +} + +func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to panic") +} + +func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to panic") +} diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go new file mode 100644 index 00000000000..17f3935e64b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher_test.go @@ -0,0 +1,36 @@ +package matchers_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("Panic", func() { + Context("when passed something that's not a function that takes zero arguments and returns nothing", func() { + It("should error", func() { + success, err := (&PanicMatcher{}).Match("foo") + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&PanicMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&PanicMatcher{}).Match(func(foo string) {}) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&PanicMatcher{}).Match(func() string { return "bar" }) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when passed a function of the correct type", func() { + It("should call the function and pass if the function panics", func() { + Ω(func() { panic("ack!") }).Should(Panic()) + Ω(func() {}).ShouldNot(Panic()) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go new file mode 100644 index 00000000000..7a8c2cda519 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -0,0 +1,126 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ReceiveMatcher struct { + Arg interface{} + receivedValue reflect.Value + channelClosed bool +} + +func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("ReceiveMatcher matcher cannot be passed a send-only channel. Got:\n%s", format.Object(actual, 1)) + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + + if matcher.Arg != nil { + subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + if !hasSubMatcher { + argType := reflect.TypeOf(matcher.Arg) + if argType.Kind() != reflect.Ptr { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) + } + + assignable := channelType.Elem().AssignableTo(argType.Elem()) + if !assignable { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(matcher.Arg, 1)) + } + } + } + + winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ + reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, + reflect.SelectCase{Dir: reflect.SelectDefault}, + }) + + var closed bool + var didReceive bool + if winnerIndex == 0 { + closed = !open + didReceive = open + } + matcher.channelClosed = closed + + if closed { + return false, nil + } + + if hasSubMatcher { + if didReceive { + matcher.receivedValue = value + return subMatcher.Match(matcher.receivedValue.Interface()) + } else { + return false, nil + } + } + + if didReceive { + if matcher.Arg != nil { + outValue := reflect.ValueOf(matcher.Arg) + reflect.Indirect(outValue).Set(value) + } + + return true, nil + } else { + return false, nil + } +} + +func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.FailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } else { + return format.Message(actual, "to receive something."+closedAddendum) + } +} + +func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.NegatedFailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } else { + return format.Message(actual, "not to receive anything."+closedAddendum) + } +} + +func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go new file mode 100644 index 00000000000..938c078e6f7 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher_test.go @@ -0,0 +1,280 @@ +package matchers_test + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +type kungFuActor interface { + DrunkenMaster() bool +} + +type jackie struct { + name string +} + +func (j *jackie) DrunkenMaster() bool { + return true +} + +var _ = Describe("ReceiveMatcher", func() { + Context("with no argument", func() { + Context("for a buffered channel", func() { + It("should succeed", func() { + channel := make(chan bool, 1) + + Ω(channel).ShouldNot(Receive()) + + channel <- true + + Ω(channel).Should(Receive()) + }) + }) + + Context("for an unbuffered channel", func() { + It("should succeed (eventually)", func() { + channel := make(chan bool) + + Ω(channel).ShouldNot(Receive()) + + go func() { + time.Sleep(10 * time.Millisecond) + channel <- true + }() + + Eventually(channel).Should(Receive()) + }) + }) + }) + + Context("with a pointer argument", func() { + Context("of the correct type", func() { + It("should write the value received on the channel to the pointer", func() { + channel := make(chan int, 1) + + var value int + + Ω(channel).ShouldNot(Receive(&value)) + Ω(value).Should(BeZero()) + + channel <- 17 + + Ω(channel).Should(Receive(&value)) + Ω(value).Should(Equal(17)) + }) + }) + + Context("to various types of objects", func() { + It("should work", func() { + //channels of strings + stringChan := make(chan string, 1) + stringChan <- "foo" + + var s string + Ω(stringChan).Should(Receive(&s)) + Ω(s).Should(Equal("foo")) + + //channels of slices + sliceChan := make(chan []bool, 1) + sliceChan <- []bool{true, true, false} + + var sl []bool + Ω(sliceChan).Should(Receive(&sl)) + Ω(sl).Should(Equal([]bool{true, true, false})) + + //channels of channels + chanChan := make(chan chan bool, 1) + c := make(chan bool) + chanChan <- c + + var receivedC chan bool + Ω(chanChan).Should(Receive(&receivedC)) + Ω(receivedC).Should(Equal(c)) + + //channels of interfaces + jackieChan := make(chan kungFuActor, 1) + aJackie := &jackie{name: "Jackie Chan"} + jackieChan <- aJackie + + var theJackie kungFuActor + Ω(jackieChan).Should(Receive(&theJackie)) + Ω(theJackie).Should(Equal(aJackie)) + }) + }) + + Context("of the wrong type", func() { + It("should error", func() { + channel := make(chan int) + var incorrectType bool + + success, err := (&ReceiveMatcher{Arg: &incorrectType}).Match(channel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + var notAPointer int + success, err = (&ReceiveMatcher{Arg: notAPointer}).Match(channel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + }) + + Context("with a matcher", func() { + It("should defer to the underlying matcher", func() { + intChannel := make(chan int, 1) + intChannel <- 3 + Ω(intChannel).Should(Receive(Equal(3))) + + intChannel <- 2 + Ω(intChannel).ShouldNot(Receive(Equal(3))) + + stringChannel := make(chan []string, 1) + stringChannel <- []string{"foo", "bar", "baz"} + Ω(stringChannel).Should(Receive(ContainElement(ContainSubstring("fo")))) + + stringChannel <- []string{"foo", "bar", "baz"} + Ω(stringChannel).ShouldNot(Receive(ContainElement(ContainSubstring("archipelago")))) + }) + + It("should defer to the underlying matcher for the message", func() { + matcher := Receive(Equal(3)) + channel := make(chan int, 1) + channel <- 2 + matcher.Match(channel) + Ω(matcher.FailureMessage(channel)).Should(MatchRegexp(`Expected\s+: 2\s+to equal\s+: 3`)) + + channel <- 3 + matcher.Match(channel) + Ω(matcher.NegatedFailureMessage(channel)).Should(MatchRegexp(`Expected\s+: 3\s+not to equal\s+: 3`)) + }) + + It("should work just fine with Eventually", func() { + stringChannel := make(chan string) + + go func() { + time.Sleep(5 * time.Millisecond) + stringChannel <- "A" + time.Sleep(5 * time.Millisecond) + stringChannel <- "B" + }() + + Eventually(stringChannel).Should(Receive(Equal("B"))) + }) + + Context("if the matcher errors", func() { + It("should error", func() { + channel := make(chan int, 1) + channel <- 3 + success, err := (&ReceiveMatcher{Arg: ContainSubstring("three")}).Match(channel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("if nothing is received", func() { + It("should fail", func() { + channel := make(chan int, 1) + success, err := (&ReceiveMatcher{Arg: Equal(1)}).Match(channel) + Ω(success).Should(BeFalse()) + Ω(err).ShouldNot(HaveOccurred()) + }) + }) + }) + + Context("When actual is a *closed* channel", func() { + Context("for a buffered channel", func() { + It("should work until it hits the end of the buffer", func() { + channel := make(chan bool, 1) + channel <- true + + close(channel) + + Ω(channel).Should(Receive()) + Ω(channel).ShouldNot(Receive()) + }) + }) + + Context("for an unbuffered channel", func() { + It("should always fail", func() { + channel := make(chan bool) + close(channel) + + Ω(channel).ShouldNot(Receive()) + }) + }) + }) + + Context("When actual is a send-only channel", func() { + It("should error", func() { + channel := make(chan bool) + + var writerChannel chan<- bool + writerChannel = channel + + success, err := (&ReceiveMatcher{}).Match(writerChannel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Context("when acutal is a non-channel", func() { + It("should error", func() { + var nilChannel chan bool + + success, err := (&ReceiveMatcher{}).Match(nilChannel) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&ReceiveMatcher{}).Match(nil) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + + success, err = (&ReceiveMatcher{}).Match(3) + Ω(success).Should(BeFalse()) + Ω(err).Should(HaveOccurred()) + }) + }) + + Describe("when used with eventually and a custom matcher", func() { + It("should return the matcher's error when a failing value is received on the channel, instead of the must receive something failure", func() { + failures := InterceptGomegaFailures(func() { + c := make(chan string, 0) + Eventually(c, 0.01).Should(Receive(Equal("hello"))) + }) + Ω(failures[0]).Should(ContainSubstring("When passed a matcher, ReceiveMatcher's channel *must* receive something.")) + + failures = InterceptGomegaFailures(func() { + c := make(chan string, 1) + c <- "hi" + Eventually(c, 0.01).Should(Receive(Equal("hello"))) + }) + Ω(failures[0]).Should(ContainSubstring(": hello")) + }) + }) + + Describe("Bailing early", func() { + It("should bail early when passed a closed channel", func() { + c := make(chan bool) + close(c) + + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(c).Should(Receive()) + }) + Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + Ω(failures).Should(HaveLen(1)) + }) + + It("should bail early when passed a non-channel", func() { + t := time.Now() + failures := InterceptGomegaFailures(func() { + Eventually(3).Should(Receive()) + }) + Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) + Ω(failures).Should(HaveLen(1)) + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go new file mode 100644 index 00000000000..721ed5529bc --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type SucceedMatcher struct { +} + +func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) { + // is purely nil? + if actual == nil { + return true, nil + } + + // must be an 'error' type + if !isError(actual) { + return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1)) + } + + // must be nil (or a pointer to a nil) + return isNil(actual), nil +} + +func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1)) +} + +func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return "Expected failure, but got no error." +} diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go new file mode 100644 index 00000000000..6b62c8bb261 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher_test.go @@ -0,0 +1,62 @@ +package matchers_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +func Erroring() error { + return errors.New("bam") +} + +func NotErroring() error { + return nil +} + +type AnyType struct{} + +func Invalid() *AnyType { + return nil +} + +var _ = Describe("Succeed", func() { + It("should succeed if the function succeeds", func() { + Ω(NotErroring()).Should(Succeed()) + }) + + It("should succeed (in the negated) if the function errored", func() { + Ω(Erroring()).ShouldNot(Succeed()) + }) + + It("should not if passed a non-error", func() { + success, err := (&SucceedMatcher{}).Match(Invalid()) + Ω(success).Should(BeFalse()) + Ω(err).Should(MatchError("Expected an error-type. Got:\n <*matchers_test.AnyType | 0x0>: nil")) + }) + + It("doesn't support non-error type", func() { + success, err := (&SucceedMatcher{}).Match(AnyType{}) + Ω(success).Should(BeFalse()) + Ω(err).Should(MatchError("Expected an error-type. Got:\n : {}")) + }) + + It("doesn't support non-error pointer type", func() { + success, err := (&SucceedMatcher{}).Match(&AnyType{}) + Ω(success).Should(BeFalse()) + Ω(err).Should(MatchError(MatchRegexp(`Expected an error-type. Got:\n <*matchers_test.AnyType | 0x[[:xdigit:]]+>: {}`))) + }) + + It("should not succeed with pointer types that conform to error interface", func() { + err := &CustomErr{"ohai"} + Ω(err).ShouldNot(Succeed()) + }) + + It("should succeed with nil pointers to types that conform to error interface", func() { + var err *CustomErr = nil + Ω(err).Should(Succeed()) + }) + +}) diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go new file mode 100644 index 00000000000..119d21ef317 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go @@ -0,0 +1,41 @@ +package bipartitegraph + +import "errors" +import "fmt" + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" + +type BipartiteGraph struct { + Left NodeOrderedSet + Right NodeOrderedSet + Edges EdgeSet +} + +func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) { + left := NodeOrderedSet{} + for i, _ := range leftValues { + left = append(left, Node{i}) + } + + right := NodeOrderedSet{} + for j, _ := range rightValues { + right = append(right, Node{j + len(left)}) + } + + edges := EdgeSet{} + for i, leftValue := range leftValues { + for j, rightValue := range rightValues { + neighbours, err := neighbours(leftValue, rightValue) + if err != nil { + return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error())) + } + + if neighbours { + edges = append(edges, Edge{left[i], right[j]}) + } + } + } + + return &BipartiteGraph{left, right, edges}, nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go new file mode 100644 index 00000000000..32529c51131 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -0,0 +1,161 @@ +package bipartitegraph + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" +import "github.com/onsi/gomega/matchers/support/goraph/util" + +func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) { + paths := bg.maximalDisjointSLAPCollection(matching) + + for len(paths) > 0 { + for _, path := range paths { + matching = matching.SymmetricDifference(path) + } + paths = bg.maximalDisjointSLAPCollection(matching) + } + + return +} + +func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (result []EdgeSet) { + guideLayers := bg.createSLAPGuideLayers(matching) + if len(guideLayers) == 0 { + return + } + + used := make(map[Node]bool) + + for _, u := range guideLayers[len(guideLayers)-1] { + slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used) + if found { + for _, edge := range slap { + used[edge.Node1] = true + used[edge.Node2] = true + } + result = append(result, slap) + } + } + + return +} + +func (bg *BipartiteGraph) findDisjointSLAP( + start Node, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) ([]Edge, bool) { + return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used) +} + +func (bg *BipartiteGraph) findDisjointSLAPHelper( + currentNode Node, + currentSLAP EdgeSet, + currentLevel int, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) (EdgeSet, bool) { + used[currentNode] = true + + if currentLevel == 0 { + return currentSLAP, true + } + + for _, nextNode := range guideLayers[currentLevel-1] { + if used[nextNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(currentNode, nextNode) + if !found { + continue + } + + if matching.Contains(edge) == util.Odd(currentLevel) { + continue + } + + currentSLAP = append(currentSLAP, edge) + slap, found := bg.findDisjointSLAPHelper(nextNode, currentSLAP, currentLevel-1, matching, guideLayers, used) + if found { + return slap, true + } + currentSLAP = currentSLAP[:len(currentSLAP)-1] + } + + used[currentNode] = false + return nil, false +} + +func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) { + used := make(map[Node]bool) + currentLayer := NodeOrderedSet{} + + for _, node := range bg.Left { + if matching.Free(node) { + used[node] = true + currentLayer = append(currentLayer, node) + } + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } else { + guideLayers = append(guideLayers, currentLayer) + } + + done := false + + for !done { + lastLayer := currentLayer + currentLayer = NodeOrderedSet{} + + if util.Odd(len(guideLayers)) { + for _, leftNode := range lastLayer { + for _, rightNode := range bg.Right { + if used[rightNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, rightNode) + used[rightNode] = true + + if matching.Free(rightNode) { + done = true + } + } + } + } else { + for _, rightNode := range lastLayer { + for _, leftNode := range bg.Left { + if used[leftNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || !matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, leftNode) + used[leftNode] = true + } + } + + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } else { + guideLayers = append(guideLayers, currentLayer) + } + } + + return +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go new file mode 100644 index 00000000000..4fd15cc0694 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go @@ -0,0 +1,61 @@ +package edge + +import . "github.com/onsi/gomega/matchers/support/goraph/node" + +type Edge struct { + Node1 Node + Node2 Node +} + +type EdgeSet []Edge + +func (ec EdgeSet) Free(node Node) bool { + for _, e := range ec { + if e.Node1 == node || e.Node2 == node { + return false + } + } + + return true +} + +func (ec EdgeSet) Contains(edge Edge) bool { + for _, e := range ec { + if e == edge { + return true + } + } + + return false +} + +func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) { + for _, e := range ec { + if (e.Node1 == node1 && e.Node2 == node2) || (e.Node1 == node2 && e.Node2 == node1) { + return e, true + } + } + + return Edge{}, false +} + +func (ec EdgeSet) SymmetricDifference(ec2 EdgeSet) EdgeSet { + edgesToInclude := make(map[Edge]bool) + + for _, e := range ec { + edgesToInclude[e] = true + } + + for _, e := range ec2 { + edgesToInclude[e] = !edgesToInclude[e] + } + + result := EdgeSet{} + for e, include := range edgesToInclude { + if include { + result = append(result, e) + } + } + + return result +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go new file mode 100644 index 00000000000..800c2ea8caf --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go @@ -0,0 +1,7 @@ +package node + +type Node struct { + Id int +} + +type NodeOrderedSet []Node diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go new file mode 100644 index 00000000000..d76a1ee00a2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go @@ -0,0 +1,7 @@ +package util + +import "math" + +func Odd(n int) bool { + return math.Mod(float64(n), 2.0) == 1.0 +} diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go new file mode 100644 index 00000000000..04020f004e6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -0,0 +1,176 @@ +/* +Gomega matchers + +This package implements the Gomega matchers and does not typically need to be imported. +See the docs for Gomega for documentation on the matchers + +http://onsi.github.io/gomega/ +*/ +package matchers + +import ( + "fmt" + "reflect" +) + +type omegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +} + +func isBool(a interface{}) bool { + return reflect.TypeOf(a).Kind() == reflect.Bool +} + +func isNumber(a interface{}) bool { + if a == nil { + return false + } + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Float64 +} + +func isInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Int64 +} + +func isUnsignedInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Uint <= kind && kind <= reflect.Uint64 +} + +func isFloat(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Float32 <= kind && kind <= reflect.Float64 +} + +func toInteger(a interface{}) int64 { + if isInteger(a) { + return reflect.ValueOf(a).Int() + } else if isUnsignedInteger(a) { + return int64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return int64(reflect.ValueOf(a).Float()) + } else { + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) + } +} + +func toUnsignedInteger(a interface{}) uint64 { + if isInteger(a) { + return uint64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return reflect.ValueOf(a).Uint() + } else if isFloat(a) { + return uint64(reflect.ValueOf(a).Float()) + } else { + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) + } +} + +func toFloat(a interface{}) float64 { + if isInteger(a) { + return float64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return float64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return reflect.ValueOf(a).Float() + } else { + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) + } +} + +func isError(a interface{}) bool { + _, ok := a.(error) + return ok +} + +func isChan(a interface{}) bool { + if isNil(a) { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Chan +} + +func isMap(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Map +} + +func isArrayOrSlice(a interface{}) bool { + if a == nil { + return false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Array, reflect.Slice: + return true + default: + return false + } +} + +func isString(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.String +} + +func toString(a interface{}) (string, bool) { + aString, isString := a.(string) + if isString { + return aString, true + } + + aBytes, isBytes := a.([]byte) + if isBytes { + return string(aBytes), true + } + + aStringer, isStringer := a.(fmt.Stringer) + if isStringer { + return aStringer.String(), true + } + + return "", false +} + +func lengthOf(a interface{}) (int, bool) { + if a == nil { + return 0, false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice: + return reflect.ValueOf(a).Len(), true + default: + return 0, false + } +} +func capOf(a interface{}) (int, bool) { + if a == nil { + return 0, false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Array, reflect.Chan, reflect.Slice: + return reflect.ValueOf(a).Cap(), true + default: + return 0, false + } +} + +func isNil(a interface{}) bool { + if a == nil { + return true + } + + switch reflect.TypeOf(a).Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return reflect.ValueOf(a).IsNil() + } + + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go new file mode 100644 index 00000000000..8e58d8a0fb7 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go @@ -0,0 +1,72 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type WithTransformMatcher struct { + // input + Transform interface{} // must be a function of one parameter that returns one value + Matcher types.GomegaMatcher + + // cached value + transformArgType reflect.Type + + // state + transformedValue interface{} +} + +func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher { + if transform == nil { + panic("transform function cannot be nil") + } + txType := reflect.TypeOf(transform) + if txType.NumIn() != 1 { + panic("transform function must have 1 argument") + } + if txType.NumOut() != 1 { + panic("transform function must have 1 return value") + } + + return &WithTransformMatcher{ + Transform: transform, + Matcher: matcher, + transformArgType: reflect.TypeOf(transform).In(0), + } +} + +func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { + // return error if actual's type is incompatible with Transform function's argument type + actualType := reflect.TypeOf(actual) + if !actualType.AssignableTo(m.transformArgType) { + return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType) + } + + // call the Transform function with `actual` + fn := reflect.ValueOf(m.Transform) + result := fn.Call([]reflect.Value{reflect.ValueOf(actual)}) + m.transformedValue = result[0].Interface() // expect exactly one value + + return m.Matcher.Match(m.transformedValue) +} + +func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) { + return m.Matcher.FailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool { + // TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.) + // + // Querying the next matcher is fine if the transformer always will return the same value. + // But if the transformer is non-deterministic and returns a different value each time, then there + // is no point in querying the next matcher, since it can only comment on the last transformed value. + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue) +} diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform_test.go b/vendor/github.com/onsi/gomega/matchers/with_transform_test.go new file mode 100644 index 00000000000..e52bf8e631a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/with_transform_test.go @@ -0,0 +1,102 @@ +package matchers_test + +import ( + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/matchers" +) + +var _ = Describe("WithTransformMatcher", func() { + + var plus1 = func(i int) int { return i + 1 } + + Context("Panic if transform function invalid", func() { + panicsWithTransformer := func(transform interface{}) { + ExpectWithOffset(1, func() { WithTransform(transform, nil) }).To(Panic()) + } + It("nil", func() { + panicsWithTransformer(nil) + }) + Context("Invalid number of args, but correct return value count", func() { + It("zero", func() { + panicsWithTransformer(func() int { return 5 }) + }) + It("two", func() { + panicsWithTransformer(func(i, j int) int { return 5 }) + }) + }) + Context("Invalid number of return values, but correct number of arguments", func() { + It("zero", func() { + panicsWithTransformer(func(i int) {}) + }) + It("two", func() { + panicsWithTransformer(func(i int) (int, int) { return 5, 6 }) + }) + }) + }) + + It("works with positive cases", func() { + Expect(1).To(WithTransform(plus1, Equal(2))) + Expect(1).To(WithTransform(plus1, WithTransform(plus1, Equal(3)))) + Expect(1).To(WithTransform(plus1, And(Equal(2), BeNumerically(">", 1)))) + + // transform expects custom type + type S struct { + A int + B string + } + transformer := func(s S) string { return s.B } + Expect(S{1, "hi"}).To(WithTransform(transformer, Equal("hi"))) + + // transform expects interface + errString := func(e error) string { return e.Error() } + Expect(errors.New("abc")).To(WithTransform(errString, Equal("abc"))) + }) + + It("works with negative cases", func() { + Expect(1).ToNot(WithTransform(plus1, Equal(3))) + Expect(1).ToNot(WithTransform(plus1, WithTransform(plus1, Equal(2)))) + }) + + Context("failure messages", func() { + Context("when match fails", func() { + It("gives a descriptive message", func() { + m := WithTransform(plus1, Equal(3)) + Expect(m.Match(1)).To(BeFalse()) + Expect(m.FailureMessage(1)).To(Equal("Expected\n : 2\nto equal\n : 3")) + }) + }) + + Context("when match succeeds, but expected it to fail", func() { + It("gives a descriptive message", func() { + m := Not(WithTransform(plus1, Equal(3))) + Expect(m.Match(2)).To(BeFalse()) + Expect(m.FailureMessage(2)).To(Equal("Expected\n : 3\nnot to equal\n : 3")) + }) + }) + + Context("actual value is incompatible with transform function's argument type", func() { + It("gracefully fails if transform cannot be performed", func() { + m := WithTransform(plus1, Equal(3)) + result, err := m.Match("hi") // give it a string but transform expects int; doesn't panic + Expect(result).To(BeFalse()) + Expect(err).To(MatchError("Transform function expects 'int' but we have 'string'")) + }) + }) + }) + + Context("MatchMayChangeInTheFuture()", func() { + It("Propagates value from wrapped matcher on the transformed value", func() { + m := WithTransform(plus1, Or()) // empty Or() always returns false, and indicates it cannot change + Expect(m.Match(1)).To(BeFalse()) + Expect(m.(*WithTransformMatcher).MatchMayChangeInTheFuture(1)).To(BeFalse()) // empty Or() indicates cannot change + }) + It("Defaults to true", func() { + m := WithTransform(plus1, Equal(2)) // Equal does not have this method + Expect(m.Match(1)).To(BeTrue()) + Expect(m.(*WithTransformMatcher).MatchMayChangeInTheFuture(1)).To(BeTrue()) // defaults to true + }) + }) +}) diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go new file mode 100644 index 00000000000..1c632ade291 --- /dev/null +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -0,0 +1,17 @@ +package types + +type GomegaFailHandler func(message string, callerSkip ...int) + +//A simple *testing.T interface wrapper +type GomegaTestingT interface { + Errorf(format string, args ...interface{}) +} + +//All Gomega matchers must implement the GomegaMatcher interface +// +//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding_your_own_matchers +type GomegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +} diff --git a/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json b/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json deleted file mode 100644 index de89983988b..00000000000 --- a/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json +++ /dev/null @@ -1,839 +0,0 @@ -{ - "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/mLggzdz9EvP0lVEQoxoRvdSJShg\"", - "discoveryVersion": "v1", - "id": "cloudmonitoring:v2beta2", - "name": "cloudmonitoring", - "canonicalName": "Cloud Monitoring", - "version": "v2beta2", - "revision": "20161031", - "title": "Cloud Monitoring API", - "description": "Accesses Google Cloud Monitoring data.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", - "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" - }, - "documentationLink": "https://cloud.google.com/monitoring/v2beta2/", - "protocol": "rest", - "baseUrl": "https://www.googleapis.com/cloudmonitoring/v2beta2/projects/", - "basePath": "/cloudmonitoring/v2beta2/projects/", - "rootUrl": "https://www.googleapis.com/", - "servicePath": "cloudmonitoring/v2beta2/projects/", - "batchPath": "batch", - "parameters": { - "alt": { - "type": "string", - "description": "Data format for the response.", - "default": "json", - "enum": [ - "json" - ], - "enumDescriptions": [ - "Responses with Content-Type of application/json" - ], - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", - "location": "query" - }, - "userIp": { - "type": "string", - "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/monitoring": { - "description": "View and write monitoring data for all of your Google and third-party Cloud and API projects" - } - } - } - }, - "schemas": { - "DeleteMetricDescriptorResponse": { - "id": "DeleteMetricDescriptorResponse", - "type": "object", - "description": "The response of cloudmonitoring.metricDescriptors.delete.", - "properties": { - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#deleteMetricDescriptorResponse\".", - "default": "cloudmonitoring#deleteMetricDescriptorResponse" - } - } - }, - "ListMetricDescriptorsRequest": { - "id": "ListMetricDescriptorsRequest", - "type": "object", - "description": "The request of cloudmonitoring.metricDescriptors.list.", - "properties": { - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listMetricDescriptorsRequest\".", - "default": "cloudmonitoring#listMetricDescriptorsRequest" - } - } - }, - "ListMetricDescriptorsResponse": { - "id": "ListMetricDescriptorsResponse", - "type": "object", - "description": "The response of cloudmonitoring.metricDescriptors.list.", - "properties": { - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listMetricDescriptorsResponse\".", - "default": "cloudmonitoring#listMetricDescriptorsResponse" - }, - "metrics": { - "type": "array", - "description": "The returned metric descriptors.", - "items": { - "$ref": "MetricDescriptor" - } - }, - "nextPageToken": { - "type": "string", - "description": "Pagination token. If present, indicates that additional results are available for retrieval. To access the results past the pagination limit, pass this value to the pageToken query parameter." - } - } - }, - "ListTimeseriesDescriptorsRequest": { - "id": "ListTimeseriesDescriptorsRequest", - "type": "object", - "description": "The request of cloudmonitoring.timeseriesDescriptors.list", - "properties": { - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listTimeseriesDescriptorsRequest\".", - "default": "cloudmonitoring#listTimeseriesDescriptorsRequest" - } - } - }, - "ListTimeseriesDescriptorsResponse": { - "id": "ListTimeseriesDescriptorsResponse", - "type": "object", - "description": "The response of cloudmonitoring.timeseriesDescriptors.list", - "properties": { - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listTimeseriesDescriptorsResponse\".", - "default": "cloudmonitoring#listTimeseriesDescriptorsResponse" - }, - "nextPageToken": { - "type": "string", - "description": "Pagination token. If present, indicates that additional results are available for retrieval. To access the results past the pagination limit, set this value to the pageToken query parameter." - }, - "oldest": { - "type": "string", - "description": "The oldest timestamp of the interval of this query, as an RFC 3339 string.", - "format": "date-time" - }, - "timeseries": { - "type": "array", - "description": "The returned time series descriptors.", - "items": { - "$ref": "TimeseriesDescriptor" - } - }, - "youngest": { - "type": "string", - "description": "The youngest timestamp of the interval of this query, as an RFC 3339 string.", - "format": "date-time" - } - } - }, - "ListTimeseriesRequest": { - "id": "ListTimeseriesRequest", - "type": "object", - "description": "The request of cloudmonitoring.timeseries.list", - "properties": { - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listTimeseriesRequest\".", - "default": "cloudmonitoring#listTimeseriesRequest" - } - } - }, - "ListTimeseriesResponse": { - "id": "ListTimeseriesResponse", - "type": "object", - "description": "The response of cloudmonitoring.timeseries.list", - "properties": { - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listTimeseriesResponse\".", - "default": "cloudmonitoring#listTimeseriesResponse" - }, - "nextPageToken": { - "type": "string", - "description": "Pagination token. If present, indicates that additional results are available for retrieval. To access the results past the pagination limit, set the pageToken query parameter to this value. All of the points of a time series will be returned before returning any point of the subsequent time series." - }, - "oldest": { - "type": "string", - "description": "The oldest timestamp of the interval of this query as an RFC 3339 string.", - "format": "date-time" - }, - "timeseries": { - "type": "array", - "description": "The returned time series.", - "items": { - "$ref": "Timeseries" - } - }, - "youngest": { - "type": "string", - "description": "The youngest timestamp of the interval of this query as an RFC 3339 string.", - "format": "date-time" - } - } - }, - "MetricDescriptor": { - "id": "MetricDescriptor", - "type": "object", - "description": "A metricDescriptor defines the name, label keys, and data type of a particular metric.", - "properties": { - "description": { - "type": "string", - "description": "Description of this metric." - }, - "labels": { - "type": "array", - "description": "Labels defined for this metric.", - "items": { - "$ref": "MetricDescriptorLabelDescriptor" - } - }, - "name": { - "type": "string", - "description": "The name of this metric." - }, - "project": { - "type": "string", - "description": "The project ID to which the metric belongs." - }, - "typeDescriptor": { - "$ref": "MetricDescriptorTypeDescriptor", - "description": "Type description for this metric." - } - } - }, - "MetricDescriptorLabelDescriptor": { - "id": "MetricDescriptorLabelDescriptor", - "type": "object", - "description": "A label in a metric is a description of this metric, including the key of this description (what the description is), and the value for this description.", - "properties": { - "description": { - "type": "string", - "description": "Label description." - }, - "key": { - "type": "string", - "description": "Label key." - } - } - }, - "MetricDescriptorTypeDescriptor": { - "id": "MetricDescriptorTypeDescriptor", - "type": "object", - "description": "A type in a metric contains information about how the metric is collected and what its data points look like.", - "properties": { - "metricType": { - "type": "string", - "description": "The method of collecting data for the metric. See Metric types." - }, - "valueType": { - "type": "string", - "description": "The data type of of individual points in the metric's time series. See Metric value types." - } - } - }, - "Point": { - "id": "Point", - "type": "object", - "description": "Point is a single point in a time series. It consists of a start time, an end time, and a value.", - "properties": { - "boolValue": { - "type": "boolean", - "description": "The value of this data point. Either \"true\" or \"false\"." - }, - "distributionValue": { - "$ref": "PointDistribution", - "description": "The value of this data point as a distribution. A distribution value can contain a list of buckets and/or an underflowBucket and an overflowBucket. The values of these points can be used to create a histogram." - }, - "doubleValue": { - "type": "number", - "description": "The value of this data point as a double-precision floating-point number.", - "format": "double" - }, - "end": { - "type": "string", - "description": "The interval [start, end] is the time period to which the point's value applies. For gauge metrics, whose values are instantaneous measurements, this interval should be empty (start should equal end). For cumulative metrics (of which deltas and rates are special cases), the interval should be non-empty. Both start and end are RFC 3339 strings.", - "format": "date-time" - }, - "int64Value": { - "type": "string", - "description": "The value of this data point as a 64-bit integer.", - "format": "int64" - }, - "start": { - "type": "string", - "description": "The interval [start, end] is the time period to which the point's value applies. For gauge metrics, whose values are instantaneous measurements, this interval should be empty (start should equal end). For cumulative metrics (of which deltas and rates are special cases), the interval should be non-empty. Both start and end are RFC 3339 strings.", - "format": "date-time" - }, - "stringValue": { - "type": "string", - "description": "The value of this data point in string format." - } - } - }, - "PointDistribution": { - "id": "PointDistribution", - "type": "object", - "description": "Distribution data point value type. When writing distribution points, try to be consistent with the boundaries of your buckets. If you must modify the bucket boundaries, then do so by merging, partitioning, or appending rather than skewing them.", - "properties": { - "buckets": { - "type": "array", - "description": "The finite buckets.", - "items": { - "$ref": "PointDistributionBucket" - } - }, - "overflowBucket": { - "$ref": "PointDistributionOverflowBucket", - "description": "The overflow bucket." - }, - "underflowBucket": { - "$ref": "PointDistributionUnderflowBucket", - "description": "The underflow bucket." - } - } - }, - "PointDistributionBucket": { - "id": "PointDistributionBucket", - "type": "object", - "description": "The histogram's bucket. Buckets that form the histogram of a distribution value. If the upper bound of a bucket, say U1, does not equal the lower bound of the next bucket, say L2, this means that there is no event in [U1, L2).", - "properties": { - "count": { - "type": "string", - "description": "The number of events whose values are in the interval defined by this bucket.", - "format": "int64" - }, - "lowerBound": { - "type": "number", - "description": "The lower bound of the value interval of this bucket (inclusive).", - "format": "double" - }, - "upperBound": { - "type": "number", - "description": "The upper bound of the value interval of this bucket (exclusive).", - "format": "double" - } - } - }, - "PointDistributionOverflowBucket": { - "id": "PointDistributionOverflowBucket", - "type": "object", - "description": "The overflow bucket is a special bucket that does not have the upperBound field; it includes all of the events that are no less than its lower bound.", - "properties": { - "count": { - "type": "string", - "description": "The number of events whose values are in the interval defined by this bucket.", - "format": "int64" - }, - "lowerBound": { - "type": "number", - "description": "The lower bound of the value interval of this bucket (inclusive).", - "format": "double" - } - } - }, - "PointDistributionUnderflowBucket": { - "id": "PointDistributionUnderflowBucket", - "type": "object", - "description": "The underflow bucket is a special bucket that does not have the lowerBound field; it includes all of the events that are less than its upper bound.", - "properties": { - "count": { - "type": "string", - "description": "The number of events whose values are in the interval defined by this bucket.", - "format": "int64" - }, - "upperBound": { - "type": "number", - "description": "The upper bound of the value interval of this bucket (exclusive).", - "format": "double" - } - } - }, - "Timeseries": { - "id": "Timeseries", - "type": "object", - "description": "The monitoring data is organized as metrics and stored as data points that are recorded over time. Each data point represents information like the CPU utilization of your virtual machine. A historical record of these data points is called a time series.", - "properties": { - "points": { - "type": "array", - "description": "The data points of this time series. The points are listed in order of their end timestamp, from younger to older.", - "items": { - "$ref": "Point" - } - }, - "timeseriesDesc": { - "$ref": "TimeseriesDescriptor", - "description": "The descriptor of this time series." - } - } - }, - "TimeseriesDescriptor": { - "id": "TimeseriesDescriptor", - "type": "object", - "description": "TimeseriesDescriptor identifies a single time series.", - "properties": { - "labels": { - "type": "object", - "description": "The label's name.", - "additionalProperties": { - "type": "string", - "description": "The label's name." - } - }, - "metric": { - "type": "string", - "description": "The name of the metric." - }, - "project": { - "type": "string", - "description": "The Developers Console project number to which this time series belongs." - } - } - }, - "TimeseriesDescriptorLabel": { - "id": "TimeseriesDescriptorLabel", - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "The label's name." - }, - "value": { - "type": "string", - "description": "The label's value." - } - } - }, - "TimeseriesPoint": { - "id": "TimeseriesPoint", - "type": "object", - "description": "When writing time series, TimeseriesPoint should be used instead of Timeseries, to enforce single point for each time series in the timeseries.write request.", - "properties": { - "point": { - "$ref": "Point", - "description": "The data point in this time series snapshot." - }, - "timeseriesDesc": { - "$ref": "TimeseriesDescriptor", - "description": "The descriptor of this time series." - } - } - }, - "WriteTimeseriesRequest": { - "id": "WriteTimeseriesRequest", - "type": "object", - "description": "The request of cloudmonitoring.timeseries.write", - "properties": { - "commonLabels": { - "type": "object", - "description": "The label's name.", - "additionalProperties": { - "type": "string", - "description": "The label's name." - } - }, - "timeseries": { - "type": "array", - "description": "Provide time series specific labels and the data points for each time series. The labels in timeseries and the common_labels should form a complete list of labels that required by the metric.", - "items": { - "$ref": "TimeseriesPoint" - } - } - } - }, - "WriteTimeseriesResponse": { - "id": "WriteTimeseriesResponse", - "type": "object", - "description": "The response of cloudmonitoring.timeseries.write", - "properties": { - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#writeTimeseriesResponse\".", - "default": "cloudmonitoring#writeTimeseriesResponse" - } - } - } - }, - "resources": { - "metricDescriptors": { - "methods": { - "create": { - "id": "cloudmonitoring.metricDescriptors.create", - "path": "{project}/metricDescriptors", - "httpMethod": "POST", - "description": "Create a new metric.", - "parameters": { - "project": { - "type": "string", - "description": "The project id. The value can be the numeric project ID or string-based project name.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "MetricDescriptor" - }, - "response": { - "$ref": "MetricDescriptor" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ] - }, - "delete": { - "id": "cloudmonitoring.metricDescriptors.delete", - "path": "{project}/metricDescriptors/{metric}", - "httpMethod": "DELETE", - "description": "Delete an existing metric.", - "parameters": { - "metric": { - "type": "string", - "description": "Name of the metric.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "The project ID to which the metric belongs.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "metric" - ], - "response": { - "$ref": "DeleteMetricDescriptorResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ] - }, - "list": { - "id": "cloudmonitoring.metricDescriptors.list", - "path": "{project}/metricDescriptors", - "httpMethod": "GET", - "description": "List metric descriptors that match the query. If the query is not set, then all of the metric descriptors will be returned. Large responses will be paginated, use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", - "parameters": { - "count": { - "type": "integer", - "description": "Maximum number of metric descriptors per page. Used for pagination. If not specified, count = 100.", - "default": "100", - "format": "int32", - "minimum": "1", - "maximum": "1000", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "The project id. The value can be the numeric project ID or string-based project name.", - "required": true, - "location": "path" - }, - "query": { - "type": "string", - "description": "The query used to search against existing metrics. Separate keywords with a space; the service joins all keywords with AND, meaning that all keywords must match for a metric to be returned. If this field is omitted, all metrics are returned. If an empty string is passed with this field, no metrics are returned.", - "location": "query" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "ListMetricDescriptorsRequest" - }, - "response": { - "$ref": "ListMetricDescriptorsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ] - } - } - }, - "timeseries": { - "methods": { - "list": { - "id": "cloudmonitoring.timeseries.list", - "path": "{project}/timeseries/{metric}", - "httpMethod": "GET", - "description": "List the data points of the time series that match the metric and labels values and that have data points in the interval. Large responses are paginated; use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", - "parameters": { - "aggregator": { - "type": "string", - "description": "The aggregation function that will reduce the data points in each window to a single point. This parameter is only valid for non-cumulative metrics with a value type of INT64 or DOUBLE.", - "enum": [ - "max", - "mean", - "min", - "sum" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "location": "query" - }, - "count": { - "type": "integer", - "description": "Maximum number of data points per page, which is used for pagination of results.", - "default": "6000", - "format": "int32", - "minimum": "1", - "maximum": "12000", - "location": "query" - }, - "labels": { - "type": "string", - "description": "A collection of labels for the matching time series, which are represented as: \n- key==value: key equals the value \n- key=~value: key regex matches the value \n- key!=value: key does not equal the value \n- key!~value: key regex does not match the value For example, to list all of the time series descriptors for the region us-central1, you could specify:\nlabel=cloud.googleapis.com%2Flocation=~us-central1.*", - "pattern": "(.+?)(==|=~|!=|!~)(.+)", - "repeated": true, - "location": "query" - }, - "metric": { - "type": "string", - "description": "Metric names are protocol-free URLs as listed in the Supported Metrics page. For example, compute.googleapis.com/instance/disk/read_ops_count.", - "required": true, - "location": "path" - }, - "oldest": { - "type": "string", - "description": "Start of the time interval (exclusive), which is expressed as an RFC 3339 timestamp. If neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest]", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "The project ID to which this time series belongs. The value can be the numeric project ID or string-based project name.", - "required": true, - "location": "path" - }, - "timespan": { - "type": "string", - "description": "Length of the time interval to query, which is an alternative way to declare the interval: (youngest - timespan, youngest]. The timespan and oldest parameters should not be used together. Units: \n- s: second \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.\n\nIf neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest].", - "pattern": "[0-9]+[smhdw]?", - "location": "query" - }, - "window": { - "type": "string", - "description": "The sampling window. At most one data point will be returned for each window in the requested time interval. This parameter is only valid for non-cumulative metric types. Units: \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.", - "pattern": "[0-9]+[mhdw]?", - "location": "query" - }, - "youngest": { - "type": "string", - "description": "End of the time interval (inclusive), which is expressed as an RFC 3339 timestamp.", - "required": true, - "location": "query" - } - }, - "parameterOrder": [ - "project", - "metric", - "youngest" - ], - "request": { - "$ref": "ListTimeseriesRequest" - }, - "response": { - "$ref": "ListTimeseriesResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ] - }, - "write": { - "id": "cloudmonitoring.timeseries.write", - "path": "{project}/timeseries:write", - "httpMethod": "POST", - "description": "Put data points to one or more time series for one or more metrics. If a time series does not exist, a new time series will be created. It is not allowed to write a time series point that is older than the existing youngest point of that time series. Points that are older than the existing youngest point of that time series will be discarded silently. Therefore, users should make sure that points of a time series are written sequentially in the order of their end time.", - "parameters": { - "project": { - "type": "string", - "description": "The project ID. The value can be the numeric project ID or string-based project name.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "WriteTimeseriesRequest" - }, - "response": { - "$ref": "WriteTimeseriesResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ] - } - } - }, - "timeseriesDescriptors": { - "methods": { - "list": { - "id": "cloudmonitoring.timeseriesDescriptors.list", - "path": "{project}/timeseriesDescriptors/{metric}", - "httpMethod": "GET", - "description": "List the descriptors of the time series that match the metric and labels values and that have data points in the interval. Large responses are paginated; use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", - "parameters": { - "aggregator": { - "type": "string", - "description": "The aggregation function that will reduce the data points in each window to a single point. This parameter is only valid for non-cumulative metrics with a value type of INT64 or DOUBLE.", - "enum": [ - "max", - "mean", - "min", - "sum" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "location": "query" - }, - "count": { - "type": "integer", - "description": "Maximum number of time series descriptors per page. Used for pagination. If not specified, count = 100.", - "default": "100", - "format": "int32", - "minimum": "1", - "maximum": "1000", - "location": "query" - }, - "labels": { - "type": "string", - "description": "A collection of labels for the matching time series, which are represented as: \n- key==value: key equals the value \n- key=~value: key regex matches the value \n- key!=value: key does not equal the value \n- key!~value: key regex does not match the value For example, to list all of the time series descriptors for the region us-central1, you could specify:\nlabel=cloud.googleapis.com%2Flocation=~us-central1.*", - "pattern": "(.+?)(==|=~|!=|!~)(.+)", - "repeated": true, - "location": "query" - }, - "metric": { - "type": "string", - "description": "Metric names are protocol-free URLs as listed in the Supported Metrics page. For example, compute.googleapis.com/instance/disk/read_ops_count.", - "required": true, - "location": "path" - }, - "oldest": { - "type": "string", - "description": "Start of the time interval (exclusive), which is expressed as an RFC 3339 timestamp. If neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest]", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "The project ID to which this time series belongs. The value can be the numeric project ID or string-based project name.", - "required": true, - "location": "path" - }, - "timespan": { - "type": "string", - "description": "Length of the time interval to query, which is an alternative way to declare the interval: (youngest - timespan, youngest]. The timespan and oldest parameters should not be used together. Units: \n- s: second \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.\n\nIf neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest].", - "pattern": "[0-9]+[smhdw]?", - "location": "query" - }, - "window": { - "type": "string", - "description": "The sampling window. At most one data point will be returned for each window in the requested time interval. This parameter is only valid for non-cumulative metric types. Units: \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.", - "pattern": "[0-9]+[mhdw]?", - "location": "query" - }, - "youngest": { - "type": "string", - "description": "End of the time interval (inclusive), which is expressed as an RFC 3339 timestamp.", - "required": true, - "location": "query" - } - }, - "parameterOrder": [ - "project", - "metric", - "youngest" - ], - "request": { - "$ref": "ListTimeseriesDescriptorsRequest" - }, - "response": { - "$ref": "ListTimeseriesDescriptorsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ] - } - } - } - } -} diff --git a/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go b/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go deleted file mode 100644 index 57208fccd3f..00000000000 --- a/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go +++ /dev/null @@ -1,2247 +0,0 @@ -// Package cloudmonitoring provides access to the Cloud Monitoring API. -// -// See https://cloud.google.com/monitoring/v2beta2/ -// -// Usage example: -// -// import "google.golang.org/api/cloudmonitoring/v2beta2" -// ... -// cloudmonitoringService, err := cloudmonitoring.New(oauthHttpClient) -package cloudmonitoring // import "google.golang.org/api/cloudmonitoring/v2beta2" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" - "io" - "net/http" - "net/url" - "strconv" - "strings" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = ctxhttp.Do - -const apiId = "cloudmonitoring:v2beta2" -const apiName = "cloudmonitoring" -const apiVersion = "v2beta2" -const basePath = "https://www.googleapis.com/cloudmonitoring/v2beta2/projects/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - - // View and write monitoring data for all of your Google and third-party - // Cloud and API projects - MonitoringScope = "https://www.googleapis.com/auth/monitoring" -) - -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.MetricDescriptors = NewMetricDescriptorsService(s) - s.Timeseries = NewTimeseriesService(s) - s.TimeseriesDescriptors = NewTimeseriesDescriptorsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - MetricDescriptors *MetricDescriptorsService - - Timeseries *TimeseriesService - - TimeseriesDescriptors *TimeseriesDescriptorsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewMetricDescriptorsService(s *Service) *MetricDescriptorsService { - rs := &MetricDescriptorsService{s: s} - return rs -} - -type MetricDescriptorsService struct { - s *Service -} - -func NewTimeseriesService(s *Service) *TimeseriesService { - rs := &TimeseriesService{s: s} - return rs -} - -type TimeseriesService struct { - s *Service -} - -func NewTimeseriesDescriptorsService(s *Service) *TimeseriesDescriptorsService { - rs := &TimeseriesDescriptorsService{s: s} - return rs -} - -type TimeseriesDescriptorsService struct { - s *Service -} - -// DeleteMetricDescriptorResponse: The response of -// cloudmonitoring.metricDescriptors.delete. -type DeleteMetricDescriptorResponse struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "cloudmonitoring#deleteMetricDescriptorResponse". - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DeleteMetricDescriptorResponse) MarshalJSON() ([]byte, error) { - type noMethod DeleteMetricDescriptorResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListMetricDescriptorsRequest: The request of -// cloudmonitoring.metricDescriptors.list. -type ListMetricDescriptorsRequest struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "cloudmonitoring#listMetricDescriptorsRequest". - Kind string `json:"kind,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListMetricDescriptorsRequest) MarshalJSON() ([]byte, error) { - type noMethod ListMetricDescriptorsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListMetricDescriptorsResponse: The response of -// cloudmonitoring.metricDescriptors.list. -type ListMetricDescriptorsResponse struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "cloudmonitoring#listMetricDescriptorsResponse". - Kind string `json:"kind,omitempty"` - - // Metrics: The returned metric descriptors. - Metrics []*MetricDescriptor `json:"metrics,omitempty"` - - // NextPageToken: Pagination token. If present, indicates that - // additional results are available for retrieval. To access the results - // past the pagination limit, pass this value to the pageToken query - // parameter. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListMetricDescriptorsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListMetricDescriptorsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListTimeseriesDescriptorsRequest: The request of -// cloudmonitoring.timeseriesDescriptors.list -type ListTimeseriesDescriptorsRequest struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "cloudmonitoring#listTimeseriesDescriptorsRequest". - Kind string `json:"kind,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListTimeseriesDescriptorsRequest) MarshalJSON() ([]byte, error) { - type noMethod ListTimeseriesDescriptorsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListTimeseriesDescriptorsResponse: The response of -// cloudmonitoring.timeseriesDescriptors.list -type ListTimeseriesDescriptorsResponse struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "cloudmonitoring#listTimeseriesDescriptorsResponse". - Kind string `json:"kind,omitempty"` - - // NextPageToken: Pagination token. If present, indicates that - // additional results are available for retrieval. To access the results - // past the pagination limit, set this value to the pageToken query - // parameter. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Oldest: The oldest timestamp of the interval of this query, as an RFC - // 3339 string. - Oldest string `json:"oldest,omitempty"` - - // Timeseries: The returned time series descriptors. - Timeseries []*TimeseriesDescriptor `json:"timeseries,omitempty"` - - // Youngest: The youngest timestamp of the interval of this query, as an - // RFC 3339 string. - Youngest string `json:"youngest,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListTimeseriesDescriptorsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListTimeseriesDescriptorsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListTimeseriesRequest: The request of cloudmonitoring.timeseries.list -type ListTimeseriesRequest struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "cloudmonitoring#listTimeseriesRequest". - Kind string `json:"kind,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListTimeseriesRequest) MarshalJSON() ([]byte, error) { - type noMethod ListTimeseriesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListTimeseriesResponse: The response of -// cloudmonitoring.timeseries.list -type ListTimeseriesResponse struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "cloudmonitoring#listTimeseriesResponse". - Kind string `json:"kind,omitempty"` - - // NextPageToken: Pagination token. If present, indicates that - // additional results are available for retrieval. To access the results - // past the pagination limit, set the pageToken query parameter to this - // value. All of the points of a time series will be returned before - // returning any point of the subsequent time series. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Oldest: The oldest timestamp of the interval of this query as an RFC - // 3339 string. - Oldest string `json:"oldest,omitempty"` - - // Timeseries: The returned time series. - Timeseries []*Timeseries `json:"timeseries,omitempty"` - - // Youngest: The youngest timestamp of the interval of this query as an - // RFC 3339 string. - Youngest string `json:"youngest,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListTimeseriesResponse) MarshalJSON() ([]byte, error) { - type noMethod ListTimeseriesResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MetricDescriptor: A metricDescriptor defines the name, label keys, -// and data type of a particular metric. -type MetricDescriptor struct { - // Description: Description of this metric. - Description string `json:"description,omitempty"` - - // Labels: Labels defined for this metric. - Labels []*MetricDescriptorLabelDescriptor `json:"labels,omitempty"` - - // Name: The name of this metric. - Name string `json:"name,omitempty"` - - // Project: The project ID to which the metric belongs. - Project string `json:"project,omitempty"` - - // TypeDescriptor: Type description for this metric. - TypeDescriptor *MetricDescriptorTypeDescriptor `json:"typeDescriptor,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { - type noMethod MetricDescriptor - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MetricDescriptorLabelDescriptor: A label in a metric is a description -// of this metric, including the key of this description (what the -// description is), and the value for this description. -type MetricDescriptorLabelDescriptor struct { - // Description: Label description. - Description string `json:"description,omitempty"` - - // Key: Label key. - Key string `json:"key,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MetricDescriptorLabelDescriptor) MarshalJSON() ([]byte, error) { - type noMethod MetricDescriptorLabelDescriptor - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MetricDescriptorTypeDescriptor: A type in a metric contains -// information about how the metric is collected and what its data -// points look like. -type MetricDescriptorTypeDescriptor struct { - // MetricType: The method of collecting data for the metric. See Metric - // types. - MetricType string `json:"metricType,omitempty"` - - // ValueType: The data type of of individual points in the metric's time - // series. See Metric value types. - ValueType string `json:"valueType,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MetricType") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MetricType") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MetricDescriptorTypeDescriptor) MarshalJSON() ([]byte, error) { - type noMethod MetricDescriptorTypeDescriptor - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Point: Point is a single point in a time series. It consists of a -// start time, an end time, and a value. -type Point struct { - // BoolValue: The value of this data point. Either "true" or "false". - BoolValue *bool `json:"boolValue,omitempty"` - - // DistributionValue: The value of this data point as a distribution. A - // distribution value can contain a list of buckets and/or an - // underflowBucket and an overflowBucket. The values of these points can - // be used to create a histogram. - DistributionValue *PointDistribution `json:"distributionValue,omitempty"` - - // DoubleValue: The value of this data point as a double-precision - // floating-point number. - DoubleValue *float64 `json:"doubleValue,omitempty"` - - // End: The interval [start, end] is the time period to which the - // point's value applies. For gauge metrics, whose values are - // instantaneous measurements, this interval should be empty (start - // should equal end). For cumulative metrics (of which deltas and rates - // are special cases), the interval should be non-empty. Both start and - // end are RFC 3339 strings. - End string `json:"end,omitempty"` - - // Int64Value: The value of this data point as a 64-bit integer. - Int64Value *int64 `json:"int64Value,omitempty,string"` - - // Start: The interval [start, end] is the time period to which the - // point's value applies. For gauge metrics, whose values are - // instantaneous measurements, this interval should be empty (start - // should equal end). For cumulative metrics (of which deltas and rates - // are special cases), the interval should be non-empty. Both start and - // end are RFC 3339 strings. - Start string `json:"start,omitempty"` - - // StringValue: The value of this data point in string format. - StringValue *string `json:"stringValue,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BoolValue") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BoolValue") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Point) MarshalJSON() ([]byte, error) { - type noMethod Point - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *Point) UnmarshalJSON(data []byte) error { - type noMethod Point - var s1 struct { - DoubleValue *gensupport.JSONFloat64 `json:"doubleValue"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - if s1.DoubleValue != nil { - s.DoubleValue = (*float64)(s1.DoubleValue) - } - return nil -} - -// PointDistribution: Distribution data point value type. When writing -// distribution points, try to be consistent with the boundaries of your -// buckets. If you must modify the bucket boundaries, then do so by -// merging, partitioning, or appending rather than skewing them. -type PointDistribution struct { - // Buckets: The finite buckets. - Buckets []*PointDistributionBucket `json:"buckets,omitempty"` - - // OverflowBucket: The overflow bucket. - OverflowBucket *PointDistributionOverflowBucket `json:"overflowBucket,omitempty"` - - // UnderflowBucket: The underflow bucket. - UnderflowBucket *PointDistributionUnderflowBucket `json:"underflowBucket,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Buckets") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Buckets") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PointDistribution) MarshalJSON() ([]byte, error) { - type noMethod PointDistribution - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PointDistributionBucket: The histogram's bucket. Buckets that form -// the histogram of a distribution value. If the upper bound of a -// bucket, say U1, does not equal the lower bound of the next bucket, -// say L2, this means that there is no event in [U1, L2). -type PointDistributionBucket struct { - // Count: The number of events whose values are in the interval defined - // by this bucket. - Count int64 `json:"count,omitempty,string"` - - // LowerBound: The lower bound of the value interval of this bucket - // (inclusive). - LowerBound float64 `json:"lowerBound,omitempty"` - - // UpperBound: The upper bound of the value interval of this bucket - // (exclusive). - UpperBound float64 `json:"upperBound,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Count") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Count") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PointDistributionBucket) MarshalJSON() ([]byte, error) { - type noMethod PointDistributionBucket - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *PointDistributionBucket) UnmarshalJSON(data []byte) error { - type noMethod PointDistributionBucket - var s1 struct { - LowerBound gensupport.JSONFloat64 `json:"lowerBound"` - UpperBound gensupport.JSONFloat64 `json:"upperBound"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.LowerBound = float64(s1.LowerBound) - s.UpperBound = float64(s1.UpperBound) - return nil -} - -// PointDistributionOverflowBucket: The overflow bucket is a special -// bucket that does not have the upperBound field; it includes all of -// the events that are no less than its lower bound. -type PointDistributionOverflowBucket struct { - // Count: The number of events whose values are in the interval defined - // by this bucket. - Count int64 `json:"count,omitempty,string"` - - // LowerBound: The lower bound of the value interval of this bucket - // (inclusive). - LowerBound float64 `json:"lowerBound,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Count") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Count") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PointDistributionOverflowBucket) MarshalJSON() ([]byte, error) { - type noMethod PointDistributionOverflowBucket - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *PointDistributionOverflowBucket) UnmarshalJSON(data []byte) error { - type noMethod PointDistributionOverflowBucket - var s1 struct { - LowerBound gensupport.JSONFloat64 `json:"lowerBound"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.LowerBound = float64(s1.LowerBound) - return nil -} - -// PointDistributionUnderflowBucket: The underflow bucket is a special -// bucket that does not have the lowerBound field; it includes all of -// the events that are less than its upper bound. -type PointDistributionUnderflowBucket struct { - // Count: The number of events whose values are in the interval defined - // by this bucket. - Count int64 `json:"count,omitempty,string"` - - // UpperBound: The upper bound of the value interval of this bucket - // (exclusive). - UpperBound float64 `json:"upperBound,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Count") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Count") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PointDistributionUnderflowBucket) MarshalJSON() ([]byte, error) { - type noMethod PointDistributionUnderflowBucket - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *PointDistributionUnderflowBucket) UnmarshalJSON(data []byte) error { - type noMethod PointDistributionUnderflowBucket - var s1 struct { - UpperBound gensupport.JSONFloat64 `json:"upperBound"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.UpperBound = float64(s1.UpperBound) - return nil -} - -// Timeseries: The monitoring data is organized as metrics and stored as -// data points that are recorded over time. Each data point represents -// information like the CPU utilization of your virtual machine. A -// historical record of these data points is called a time series. -type Timeseries struct { - // Points: The data points of this time series. The points are listed in - // order of their end timestamp, from younger to older. - Points []*Point `json:"points,omitempty"` - - // TimeseriesDesc: The descriptor of this time series. - TimeseriesDesc *TimeseriesDescriptor `json:"timeseriesDesc,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Points") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Points") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Timeseries) MarshalJSON() ([]byte, error) { - type noMethod Timeseries - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TimeseriesDescriptor: TimeseriesDescriptor identifies a single time -// series. -type TimeseriesDescriptor struct { - // Labels: The label's name. - Labels map[string]string `json:"labels,omitempty"` - - // Metric: The name of the metric. - Metric string `json:"metric,omitempty"` - - // Project: The Developers Console project number to which this time - // series belongs. - Project string `json:"project,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Labels") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TimeseriesDescriptor) MarshalJSON() ([]byte, error) { - type noMethod TimeseriesDescriptor - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TimeseriesDescriptorLabel struct { - // Key: The label's name. - Key string `json:"key,omitempty"` - - // Value: The label's value. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TimeseriesDescriptorLabel) MarshalJSON() ([]byte, error) { - type noMethod TimeseriesDescriptorLabel - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TimeseriesPoint: When writing time series, TimeseriesPoint should be -// used instead of Timeseries, to enforce single point for each time -// series in the timeseries.write request. -type TimeseriesPoint struct { - // Point: The data point in this time series snapshot. - Point *Point `json:"point,omitempty"` - - // TimeseriesDesc: The descriptor of this time series. - TimeseriesDesc *TimeseriesDescriptor `json:"timeseriesDesc,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Point") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Point") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TimeseriesPoint) MarshalJSON() ([]byte, error) { - type noMethod TimeseriesPoint - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// WriteTimeseriesRequest: The request of -// cloudmonitoring.timeseries.write -type WriteTimeseriesRequest struct { - // CommonLabels: The label's name. - CommonLabels map[string]string `json:"commonLabels,omitempty"` - - // Timeseries: Provide time series specific labels and the data points - // for each time series. The labels in timeseries and the common_labels - // should form a complete list of labels that required by the metric. - Timeseries []*TimeseriesPoint `json:"timeseries,omitempty"` - - // ForceSendFields is a list of field names (e.g. "CommonLabels") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CommonLabels") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *WriteTimeseriesRequest) MarshalJSON() ([]byte, error) { - type noMethod WriteTimeseriesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// WriteTimeseriesResponse: The response of -// cloudmonitoring.timeseries.write -type WriteTimeseriesResponse struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "cloudmonitoring#writeTimeseriesResponse". - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *WriteTimeseriesResponse) MarshalJSON() ([]byte, error) { - type noMethod WriteTimeseriesResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// method id "cloudmonitoring.metricDescriptors.create": - -type MetricDescriptorsCreateCall struct { - s *Service - project string - metricdescriptor *MetricDescriptor - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Create a new metric. -func (r *MetricDescriptorsService) Create(project string, metricdescriptor *MetricDescriptor) *MetricDescriptorsCreateCall { - c := &MetricDescriptorsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.metricdescriptor = metricdescriptor - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *MetricDescriptorsCreateCall) Fields(s ...googleapi.Field) *MetricDescriptorsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *MetricDescriptorsCreateCall) Context(ctx context.Context) *MetricDescriptorsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *MetricDescriptorsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *MetricDescriptorsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudmonitoring.metricDescriptors.create" call. -// Exactly one of *MetricDescriptor or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *MetricDescriptor.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MetricDescriptorsCreateCall) Do(opts ...googleapi.CallOption) (*MetricDescriptor, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &MetricDescriptor{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Create a new metric.", - // "httpMethod": "POST", - // "id": "cloudmonitoring.metricDescriptors.create", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "The project id. The value can be the numeric project ID or string-based project name.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/metricDescriptors", - // "request": { - // "$ref": "MetricDescriptor" - // }, - // "response": { - // "$ref": "MetricDescriptor" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// method id "cloudmonitoring.metricDescriptors.delete": - -type MetricDescriptorsDeleteCall struct { - s *Service - project string - metric string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Delete an existing metric. -func (r *MetricDescriptorsService) Delete(project string, metric string) *MetricDescriptorsDeleteCall { - c := &MetricDescriptorsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.metric = metric - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *MetricDescriptorsDeleteCall) Fields(s ...googleapi.Field) *MetricDescriptorsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *MetricDescriptorsDeleteCall) Context(ctx context.Context) *MetricDescriptorsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *MetricDescriptorsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *MetricDescriptorsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors/{metric}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "metric": c.metric, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudmonitoring.metricDescriptors.delete" call. -// Exactly one of *DeleteMetricDescriptorResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *DeleteMetricDescriptorResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MetricDescriptorsDeleteCall) Do(opts ...googleapi.CallOption) (*DeleteMetricDescriptorResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &DeleteMetricDescriptorResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Delete an existing metric.", - // "httpMethod": "DELETE", - // "id": "cloudmonitoring.metricDescriptors.delete", - // "parameterOrder": [ - // "project", - // "metric" - // ], - // "parameters": { - // "metric": { - // "description": "Name of the metric.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "The project ID to which the metric belongs.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/metricDescriptors/{metric}", - // "response": { - // "$ref": "DeleteMetricDescriptorResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// method id "cloudmonitoring.metricDescriptors.list": - -type MetricDescriptorsListCall struct { - s *Service - project string - listmetricdescriptorsrequest *ListMetricDescriptorsRequest - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: List metric descriptors that match the query. If the query is -// not set, then all of the metric descriptors will be returned. Large -// responses will be paginated, use the nextPageToken returned in the -// response to request subsequent pages of results by setting the -// pageToken query parameter to the value of the nextPageToken. -func (r *MetricDescriptorsService) List(project string, listmetricdescriptorsrequest *ListMetricDescriptorsRequest) *MetricDescriptorsListCall { - c := &MetricDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.listmetricdescriptorsrequest = listmetricdescriptorsrequest - return c -} - -// Count sets the optional parameter "count": Maximum number of metric -// descriptors per page. Used for pagination. If not specified, count = -// 100. -func (c *MetricDescriptorsListCall) Count(count int64) *MetricDescriptorsListCall { - c.urlParams_.Set("count", fmt.Sprint(count)) - return c -} - -// PageToken sets the optional parameter "pageToken": The pagination -// token, which is used to page through large result sets. Set this -// value to the value of the nextPageToken to retrieve the next page of -// results. -func (c *MetricDescriptorsListCall) PageToken(pageToken string) *MetricDescriptorsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Query sets the optional parameter "query": The query used to search -// against existing metrics. Separate keywords with a space; the service -// joins all keywords with AND, meaning that all keywords must match for -// a metric to be returned. If this field is omitted, all metrics are -// returned. If an empty string is passed with this field, no metrics -// are returned. -func (c *MetricDescriptorsListCall) Query(query string) *MetricDescriptorsListCall { - c.urlParams_.Set("query", query) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *MetricDescriptorsListCall) Fields(s ...googleapi.Field) *MetricDescriptorsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MetricDescriptorsListCall) IfNoneMatch(entityTag string) *MetricDescriptorsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *MetricDescriptorsListCall) Context(ctx context.Context) *MetricDescriptorsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *MetricDescriptorsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *MetricDescriptorsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudmonitoring.metricDescriptors.list" call. -// Exactly one of *ListMetricDescriptorsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListMetricDescriptorsResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MetricDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMetricDescriptorsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListMetricDescriptorsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "List metric descriptors that match the query. If the query is not set, then all of the metric descriptors will be returned. Large responses will be paginated, use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", - // "httpMethod": "GET", - // "id": "cloudmonitoring.metricDescriptors.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "count": { - // "default": "100", - // "description": "Maximum number of metric descriptors per page. Used for pagination. If not specified, count = 100.", - // "format": "int32", - // "location": "query", - // "maximum": "1000", - // "minimum": "1", - // "type": "integer" - // }, - // "pageToken": { - // "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "The project id. The value can be the numeric project ID or string-based project name.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "query": { - // "description": "The query used to search against existing metrics. Separate keywords with a space; the service joins all keywords with AND, meaning that all keywords must match for a metric to be returned. If this field is omitted, all metrics are returned. If an empty string is passed with this field, no metrics are returned.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/metricDescriptors", - // "request": { - // "$ref": "ListMetricDescriptorsRequest" - // }, - // "response": { - // "$ref": "ListMetricDescriptorsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MetricDescriptorsListCall) Pages(ctx context.Context, f func(*ListMetricDescriptorsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudmonitoring.timeseries.list": - -type TimeseriesListCall struct { - s *Service - project string - metric string - listtimeseriesrequest *ListTimeseriesRequest - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: List the data points of the time series that match the metric -// and labels values and that have data points in the interval. Large -// responses are paginated; use the nextPageToken returned in the -// response to request subsequent pages of results by setting the -// pageToken query parameter to the value of the nextPageToken. -func (r *TimeseriesService) List(project string, metric string, youngest string, listtimeseriesrequest *ListTimeseriesRequest) *TimeseriesListCall { - c := &TimeseriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.metric = metric - c.urlParams_.Set("youngest", youngest) - c.listtimeseriesrequest = listtimeseriesrequest - return c -} - -// Aggregator sets the optional parameter "aggregator": The aggregation -// function that will reduce the data points in each window to a single -// point. This parameter is only valid for non-cumulative metrics with a -// value type of INT64 or DOUBLE. -// -// Possible values: -// "max" -// "mean" -// "min" -// "sum" -func (c *TimeseriesListCall) Aggregator(aggregator string) *TimeseriesListCall { - c.urlParams_.Set("aggregator", aggregator) - return c -} - -// Count sets the optional parameter "count": Maximum number of data -// points per page, which is used for pagination of results. -func (c *TimeseriesListCall) Count(count int64) *TimeseriesListCall { - c.urlParams_.Set("count", fmt.Sprint(count)) - return c -} - -// Labels sets the optional parameter "labels": A collection of labels -// for the matching time series, which are represented as: -// - key==value: key equals the value -// - key=~value: key regex matches the value -// - key!=value: key does not equal the value -// - key!~value: key regex does not match the value For example, to -// list all of the time series descriptors for the region us-central1, -// you could -// specify: -// label=cloud.googleapis.com%2Flocation=~us-central1.* -func (c *TimeseriesListCall) Labels(labels ...string) *TimeseriesListCall { - c.urlParams_.SetMulti("labels", append([]string{}, labels...)) - return c -} - -// Oldest sets the optional parameter "oldest": Start of the time -// interval (exclusive), which is expressed as an RFC 3339 timestamp. If -// neither oldest nor timespan is specified, the default time interval -// will be (youngest - 4 hours, youngest] -func (c *TimeseriesListCall) Oldest(oldest string) *TimeseriesListCall { - c.urlParams_.Set("oldest", oldest) - return c -} - -// PageToken sets the optional parameter "pageToken": The pagination -// token, which is used to page through large result sets. Set this -// value to the value of the nextPageToken to retrieve the next page of -// results. -func (c *TimeseriesListCall) PageToken(pageToken string) *TimeseriesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Timespan sets the optional parameter "timespan": Length of the time -// interval to query, which is an alternative way to declare the -// interval: (youngest - timespan, youngest]. The timespan and oldest -// parameters should not be used together. Units: -// - s: second -// - m: minute -// - h: hour -// - d: day -// - w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for -// example: 2w3d is not allowed; you should use 17d instead. -// -// If neither oldest nor timespan is specified, the default time -// interval will be (youngest - 4 hours, youngest]. -func (c *TimeseriesListCall) Timespan(timespan string) *TimeseriesListCall { - c.urlParams_.Set("timespan", timespan) - return c -} - -// Window sets the optional parameter "window": The sampling window. At -// most one data point will be returned for each window in the requested -// time interval. This parameter is only valid for non-cumulative metric -// types. Units: -// - m: minute -// - h: hour -// - d: day -// - w: week Examples: 3m, 4w. Only one unit is allowed, for example: -// 2w3d is not allowed; you should use 17d instead. -func (c *TimeseriesListCall) Window(window string) *TimeseriesListCall { - c.urlParams_.Set("window", window) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TimeseriesListCall) Fields(s ...googleapi.Field) *TimeseriesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TimeseriesListCall) IfNoneMatch(entityTag string) *TimeseriesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TimeseriesListCall) Context(ctx context.Context) *TimeseriesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TimeseriesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TimeseriesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/timeseries/{metric}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "metric": c.metric, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudmonitoring.timeseries.list" call. -// Exactly one of *ListTimeseriesResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListTimeseriesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TimeseriesListCall) Do(opts ...googleapi.CallOption) (*ListTimeseriesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListTimeseriesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "List the data points of the time series that match the metric and labels values and that have data points in the interval. Large responses are paginated; use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", - // "httpMethod": "GET", - // "id": "cloudmonitoring.timeseries.list", - // "parameterOrder": [ - // "project", - // "metric", - // "youngest" - // ], - // "parameters": { - // "aggregator": { - // "description": "The aggregation function that will reduce the data points in each window to a single point. This parameter is only valid for non-cumulative metrics with a value type of INT64 or DOUBLE.", - // "enum": [ - // "max", - // "mean", - // "min", - // "sum" - // ], - // "enumDescriptions": [ - // "", - // "", - // "", - // "" - // ], - // "location": "query", - // "type": "string" - // }, - // "count": { - // "default": "6000", - // "description": "Maximum number of data points per page, which is used for pagination of results.", - // "format": "int32", - // "location": "query", - // "maximum": "12000", - // "minimum": "1", - // "type": "integer" - // }, - // "labels": { - // "description": "A collection of labels for the matching time series, which are represented as: \n- key==value: key equals the value \n- key=~value: key regex matches the value \n- key!=value: key does not equal the value \n- key!~value: key regex does not match the value For example, to list all of the time series descriptors for the region us-central1, you could specify:\nlabel=cloud.googleapis.com%2Flocation=~us-central1.*", - // "location": "query", - // "pattern": "(.+?)(==|=~|!=|!~)(.+)", - // "repeated": true, - // "type": "string" - // }, - // "metric": { - // "description": "Metric names are protocol-free URLs as listed in the Supported Metrics page. For example, compute.googleapis.com/instance/disk/read_ops_count.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "oldest": { - // "description": "Start of the time interval (exclusive), which is expressed as an RFC 3339 timestamp. If neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest]", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "The project ID to which this time series belongs. The value can be the numeric project ID or string-based project name.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "timespan": { - // "description": "Length of the time interval to query, which is an alternative way to declare the interval: (youngest - timespan, youngest]. The timespan and oldest parameters should not be used together. Units: \n- s: second \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.\n\nIf neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest].", - // "location": "query", - // "pattern": "[0-9]+[smhdw]?", - // "type": "string" - // }, - // "window": { - // "description": "The sampling window. At most one data point will be returned for each window in the requested time interval. This parameter is only valid for non-cumulative metric types. Units: \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.", - // "location": "query", - // "pattern": "[0-9]+[mhdw]?", - // "type": "string" - // }, - // "youngest": { - // "description": "End of the time interval (inclusive), which is expressed as an RFC 3339 timestamp.", - // "location": "query", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/timeseries/{metric}", - // "request": { - // "$ref": "ListTimeseriesRequest" - // }, - // "response": { - // "$ref": "ListTimeseriesResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TimeseriesListCall) Pages(ctx context.Context, f func(*ListTimeseriesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudmonitoring.timeseries.write": - -type TimeseriesWriteCall struct { - s *Service - project string - writetimeseriesrequest *WriteTimeseriesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Write: Put data points to one or more time series for one or more -// metrics. If a time series does not exist, a new time series will be -// created. It is not allowed to write a time series point that is older -// than the existing youngest point of that time series. Points that are -// older than the existing youngest point of that time series will be -// discarded silently. Therefore, users should make sure that points of -// a time series are written sequentially in the order of their end -// time. -func (r *TimeseriesService) Write(project string, writetimeseriesrequest *WriteTimeseriesRequest) *TimeseriesWriteCall { - c := &TimeseriesWriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.writetimeseriesrequest = writetimeseriesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TimeseriesWriteCall) Fields(s ...googleapi.Field) *TimeseriesWriteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TimeseriesWriteCall) Context(ctx context.Context) *TimeseriesWriteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TimeseriesWriteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TimeseriesWriteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.writetimeseriesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/timeseries:write") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudmonitoring.timeseries.write" call. -// Exactly one of *WriteTimeseriesResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *WriteTimeseriesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TimeseriesWriteCall) Do(opts ...googleapi.CallOption) (*WriteTimeseriesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &WriteTimeseriesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Put data points to one or more time series for one or more metrics. If a time series does not exist, a new time series will be created. It is not allowed to write a time series point that is older than the existing youngest point of that time series. Points that are older than the existing youngest point of that time series will be discarded silently. Therefore, users should make sure that points of a time series are written sequentially in the order of their end time.", - // "httpMethod": "POST", - // "id": "cloudmonitoring.timeseries.write", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "The project ID. The value can be the numeric project ID or string-based project name.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/timeseries:write", - // "request": { - // "$ref": "WriteTimeseriesRequest" - // }, - // "response": { - // "$ref": "WriteTimeseriesResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// method id "cloudmonitoring.timeseriesDescriptors.list": - -type TimeseriesDescriptorsListCall struct { - s *Service - project string - metric string - listtimeseriesdescriptorsrequest *ListTimeseriesDescriptorsRequest - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: List the descriptors of the time series that match the metric -// and labels values and that have data points in the interval. Large -// responses are paginated; use the nextPageToken returned in the -// response to request subsequent pages of results by setting the -// pageToken query parameter to the value of the nextPageToken. -func (r *TimeseriesDescriptorsService) List(project string, metric string, youngest string, listtimeseriesdescriptorsrequest *ListTimeseriesDescriptorsRequest) *TimeseriesDescriptorsListCall { - c := &TimeseriesDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.metric = metric - c.urlParams_.Set("youngest", youngest) - c.listtimeseriesdescriptorsrequest = listtimeseriesdescriptorsrequest - return c -} - -// Aggregator sets the optional parameter "aggregator": The aggregation -// function that will reduce the data points in each window to a single -// point. This parameter is only valid for non-cumulative metrics with a -// value type of INT64 or DOUBLE. -// -// Possible values: -// "max" -// "mean" -// "min" -// "sum" -func (c *TimeseriesDescriptorsListCall) Aggregator(aggregator string) *TimeseriesDescriptorsListCall { - c.urlParams_.Set("aggregator", aggregator) - return c -} - -// Count sets the optional parameter "count": Maximum number of time -// series descriptors per page. Used for pagination. If not specified, -// count = 100. -func (c *TimeseriesDescriptorsListCall) Count(count int64) *TimeseriesDescriptorsListCall { - c.urlParams_.Set("count", fmt.Sprint(count)) - return c -} - -// Labels sets the optional parameter "labels": A collection of labels -// for the matching time series, which are represented as: -// - key==value: key equals the value -// - key=~value: key regex matches the value -// - key!=value: key does not equal the value -// - key!~value: key regex does not match the value For example, to -// list all of the time series descriptors for the region us-central1, -// you could -// specify: -// label=cloud.googleapis.com%2Flocation=~us-central1.* -func (c *TimeseriesDescriptorsListCall) Labels(labels ...string) *TimeseriesDescriptorsListCall { - c.urlParams_.SetMulti("labels", append([]string{}, labels...)) - return c -} - -// Oldest sets the optional parameter "oldest": Start of the time -// interval (exclusive), which is expressed as an RFC 3339 timestamp. If -// neither oldest nor timespan is specified, the default time interval -// will be (youngest - 4 hours, youngest] -func (c *TimeseriesDescriptorsListCall) Oldest(oldest string) *TimeseriesDescriptorsListCall { - c.urlParams_.Set("oldest", oldest) - return c -} - -// PageToken sets the optional parameter "pageToken": The pagination -// token, which is used to page through large result sets. Set this -// value to the value of the nextPageToken to retrieve the next page of -// results. -func (c *TimeseriesDescriptorsListCall) PageToken(pageToken string) *TimeseriesDescriptorsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Timespan sets the optional parameter "timespan": Length of the time -// interval to query, which is an alternative way to declare the -// interval: (youngest - timespan, youngest]. The timespan and oldest -// parameters should not be used together. Units: -// - s: second -// - m: minute -// - h: hour -// - d: day -// - w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for -// example: 2w3d is not allowed; you should use 17d instead. -// -// If neither oldest nor timespan is specified, the default time -// interval will be (youngest - 4 hours, youngest]. -func (c *TimeseriesDescriptorsListCall) Timespan(timespan string) *TimeseriesDescriptorsListCall { - c.urlParams_.Set("timespan", timespan) - return c -} - -// Window sets the optional parameter "window": The sampling window. At -// most one data point will be returned for each window in the requested -// time interval. This parameter is only valid for non-cumulative metric -// types. Units: -// - m: minute -// - h: hour -// - d: day -// - w: week Examples: 3m, 4w. Only one unit is allowed, for example: -// 2w3d is not allowed; you should use 17d instead. -func (c *TimeseriesDescriptorsListCall) Window(window string) *TimeseriesDescriptorsListCall { - c.urlParams_.Set("window", window) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TimeseriesDescriptorsListCall) Fields(s ...googleapi.Field) *TimeseriesDescriptorsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TimeseriesDescriptorsListCall) IfNoneMatch(entityTag string) *TimeseriesDescriptorsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TimeseriesDescriptorsListCall) Context(ctx context.Context) *TimeseriesDescriptorsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TimeseriesDescriptorsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TimeseriesDescriptorsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/timeseriesDescriptors/{metric}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "metric": c.metric, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudmonitoring.timeseriesDescriptors.list" call. -// Exactly one of *ListTimeseriesDescriptorsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListTimeseriesDescriptorsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *TimeseriesDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListTimeseriesDescriptorsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListTimeseriesDescriptorsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "List the descriptors of the time series that match the metric and labels values and that have data points in the interval. Large responses are paginated; use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", - // "httpMethod": "GET", - // "id": "cloudmonitoring.timeseriesDescriptors.list", - // "parameterOrder": [ - // "project", - // "metric", - // "youngest" - // ], - // "parameters": { - // "aggregator": { - // "description": "The aggregation function that will reduce the data points in each window to a single point. This parameter is only valid for non-cumulative metrics with a value type of INT64 or DOUBLE.", - // "enum": [ - // "max", - // "mean", - // "min", - // "sum" - // ], - // "enumDescriptions": [ - // "", - // "", - // "", - // "" - // ], - // "location": "query", - // "type": "string" - // }, - // "count": { - // "default": "100", - // "description": "Maximum number of time series descriptors per page. Used for pagination. If not specified, count = 100.", - // "format": "int32", - // "location": "query", - // "maximum": "1000", - // "minimum": "1", - // "type": "integer" - // }, - // "labels": { - // "description": "A collection of labels for the matching time series, which are represented as: \n- key==value: key equals the value \n- key=~value: key regex matches the value \n- key!=value: key does not equal the value \n- key!~value: key regex does not match the value For example, to list all of the time series descriptors for the region us-central1, you could specify:\nlabel=cloud.googleapis.com%2Flocation=~us-central1.*", - // "location": "query", - // "pattern": "(.+?)(==|=~|!=|!~)(.+)", - // "repeated": true, - // "type": "string" - // }, - // "metric": { - // "description": "Metric names are protocol-free URLs as listed in the Supported Metrics page. For example, compute.googleapis.com/instance/disk/read_ops_count.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "oldest": { - // "description": "Start of the time interval (exclusive), which is expressed as an RFC 3339 timestamp. If neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest]", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "The project ID to which this time series belongs. The value can be the numeric project ID or string-based project name.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "timespan": { - // "description": "Length of the time interval to query, which is an alternative way to declare the interval: (youngest - timespan, youngest]. The timespan and oldest parameters should not be used together. Units: \n- s: second \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.\n\nIf neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest].", - // "location": "query", - // "pattern": "[0-9]+[smhdw]?", - // "type": "string" - // }, - // "window": { - // "description": "The sampling window. At most one data point will be returned for each window in the requested time interval. This parameter is only valid for non-cumulative metric types. Units: \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.", - // "location": "query", - // "pattern": "[0-9]+[mhdw]?", - // "type": "string" - // }, - // "youngest": { - // "description": "End of the time interval (inclusive), which is expressed as an RFC 3339 timestamp.", - // "location": "query", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/timeseriesDescriptors/{metric}", - // "request": { - // "$ref": "ListTimeseriesDescriptorsRequest" - // }, - // "response": { - // "$ref": "ListTimeseriesDescriptorsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TimeseriesDescriptorsListCall) Pages(ctx context.Context, f func(*ListTimeseriesDescriptorsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json deleted file mode 100644 index d43a80f6815..00000000000 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +++ /dev/null @@ -1,23661 +0,0 @@ -{ - "kind": "discovery#restDescription", - "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/eFyZs7VljDosqiT0aKEgQhuYKu8\"", - "discoveryVersion": "v1", - "id": "compute:beta", - "name": "compute", - "version": "beta", - "revision": "20170416", - "title": "Compute Engine API", - "description": "Creates and runs virtual machines on Google Cloud Platform.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", - "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" - }, - "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "protocol": "rest", - "baseUrl": "https://www.googleapis.com/compute/beta/projects/", - "basePath": "/compute/beta/projects/", - "rootUrl": "https://www.googleapis.com/", - "servicePath": "compute/beta/projects/", - "batchPath": "batch", - "parameters": { - "alt": { - "type": "string", - "description": "Data format for the response.", - "default": "json", - "enum": [ - "json" - ], - "enumDescriptions": [ - "Responses with Content-Type of application/json" - ], - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", - "location": "query" - }, - "userIp": { - "type": "string", - "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/compute": { - "description": "View and manage your Google Compute Engine resources" - }, - "https://www.googleapis.com/auth/compute.readonly": { - "description": "View your Google Compute Engine resources" - }, - "https://www.googleapis.com/auth/devstorage.full_control": { - "description": "Manage your data and permissions in Google Cloud Storage" - }, - "https://www.googleapis.com/auth/devstorage.read_only": { - "description": "View your data in Google Cloud Storage" - }, - "https://www.googleapis.com/auth/devstorage.read_write": { - "description": "Manage your data in Google Cloud Storage" - } - } - } - }, - "schemas": { - "AcceleratorConfig": { - "id": "AcceleratorConfig", - "type": "object", - "description": "A specification of the type and number of accelerator cards attached to the instance.", - "properties": { - "acceleratorCount": { - "type": "integer", - "description": "The number of the guest accelerator cards exposed to this instance.", - "format": "int32" - }, - "acceleratorType": { - "type": "string", - "description": "Full or partial URL of the accelerator type resource to expose to this instance." - } - } - }, - "AcceleratorType": { - "id": "AcceleratorType", - "type": "object", - "description": "An Accelerator Type resource.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this accelerator type." - }, - "description": { - "type": "string", - "description": "[Output Only] An optional textual description of the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] The type of the resource. Always compute#acceleratorType for accelerator types.", - "default": "compute#acceleratorType" - }, - "maximumCardsPerInstance": { - "type": "integer", - "description": "[Output Only] Maximum accelerator cards allowed per instance.", - "format": "int32" - }, - "name": { - "type": "string", - "description": "[Output Only] Name of the resource.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined fully-qualified URL for this resource." - }, - "zone": { - "type": "string", - "description": "[Output Only] The name of the zone where the accelerator type resides, such as us-central1-a." - } - } - }, - "AcceleratorTypeAggregatedList": { - "id": "AcceleratorTypeAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "[Output Only] A map of scoped accelerator type lists.", - "additionalProperties": { - "$ref": "AcceleratorTypesScopedList", - "description": "[Output Only] Name of the scope containing this set of accelerator types." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#acceleratorTypeAggregatedList for aggregated lists of accelerator types.", - "default": "compute#acceleratorTypeAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "AcceleratorTypeList": { - "id": "AcceleratorTypeList", - "type": "object", - "description": "Contains a list of accelerator types.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of AcceleratorType resources.", - "items": { - "$ref": "AcceleratorType" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#acceleratorTypeList for lists of accelerator types.", - "default": "compute#acceleratorTypeList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] A token used to continue a truncated list request." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "AcceleratorTypesScopedList": { - "id": "AcceleratorTypesScopedList", - "type": "object", - "properties": { - "acceleratorTypes": { - "type": "array", - "description": "[Output Only] List of accelerator types contained in this scope.", - "items": { - "$ref": "AcceleratorType" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] An informational warning that appears when the accelerator types list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "AccessConfig": { - "id": "AccessConfig", - "type": "object", - "description": "An access configuration attached to an instance's network interface. Only one access config per instance is supported.", - "properties": { - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#accessConfig for access configs.", - "default": "compute#accessConfig" - }, - "name": { - "type": "string", - "description": "The name of this access configuration. The default and recommended name is External NAT but you can use any arbitrary string you would like. For example, My external IP or Network Access." - }, - "natIP": { - "type": "string", - "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance." - }, - "type": { - "type": "string", - "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", - "default": "ONE_TO_ONE_NAT", - "enum": [ - "ONE_TO_ONE_NAT" - ], - "enumDescriptions": [ - "" - ] - } - } - }, - "Address": { - "id": "Address", - "type": "object", - "description": "A reserved address resource.", - "properties": { - "address": { - "type": "string", - "description": "The static external IP address represented by this resource." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "ipVersion": { - "type": "string", - "description": "The IP Version that will be used by this address. Valid options are IPV4 or IPV6. This can only be specified for a global address.", - "enum": [ - "IPV4", - "IPV6", - "UNSPECIFIED_VERSION" - ], - "enumDescriptions": [ - "", - "", - "" - ] - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#address for addresses.", - "default": "compute#address" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.addresses.insert" - ] - } - }, - "region": { - "type": "string", - "description": "[Output Only] URL of the region where the regional address resides. This field is not applicable to global addresses." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "status": { - "type": "string", - "description": "[Output Only] The status of the address, which can be either IN_USE or RESERVED. An address that is RESERVED is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", - "enum": [ - "IN_USE", - "RESERVED" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "users": { - "type": "array", - "description": "[Output Only] The URLs of the resources that are using this address.", - "items": { - "type": "string" - } - } - } - }, - "AddressAggregatedList": { - "id": "AddressAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "object", - "description": "[Output Only] A map of scoped address lists.", - "additionalProperties": { - "$ref": "AddressesScopedList", - "description": "[Output Only] Name of the scope containing this set of addresses." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#addressAggregatedList for aggregated lists of addresses.", - "default": "compute#addressAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "AddressList": { - "id": "AddressList", - "type": "object", - "description": "Contains a list of addresses.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of addresses.", - "items": { - "$ref": "Address" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#addressList for lists of addresses.", - "default": "compute#addressList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - } - } - }, - "AddressesScopedList": { - "id": "AddressesScopedList", - "type": "object", - "properties": { - "addresses": { - "type": "array", - "description": "[Output Only] List of addresses contained in this scope.", - "items": { - "$ref": "Address" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "AliasIpRange": { - "id": "AliasIpRange", - "type": "object", - "description": "An alias IP range attached to an instance's network interface.", - "properties": { - "ipCidrRange": { - "type": "string", - "description": "The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24)." - }, - "subnetworkRangeName": { - "type": "string", - "description": "Optional subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used." - } - } - }, - "AttachedDisk": { - "id": "AttachedDisk", - "type": "object", - "description": "An instance-attached disk resource.", - "properties": { - "autoDelete": { - "type": "boolean", - "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance)." - }, - "boot": { - "type": "boolean", - "description": "Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem." - }, - "deviceName": { - "type": "string", - "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks." - }, - "diskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Encrypts or decrypts a disk using a customer-supplied encryption key.\n\nIf you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key.\n\nIf you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance.\n\nIf you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group." - }, - "index": { - "type": "integer", - "description": "Assigns a zero-based index to this disk, where 0 is reserved for the boot disk. For example, if you have many disks attached to an instance, each disk would have a unique index number. If not specified, the server will choose an appropriate value.", - "format": "int32" - }, - "initializeParams": { - "$ref": "AttachedDiskInitializeParams", - "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both." - }, - "interface": { - "type": "string", - "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", - "enum": [ - "NVME", - "SCSI" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#attachedDisk for attached disks.", - "default": "compute#attachedDisk" - }, - "licenses": { - "type": "array", - "description": "[Output Only] Any valid publicly visible licenses.", - "items": { - "type": "string" - } - }, - "mode": { - "type": "string", - "description": "The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.", - "enum": [ - "READ_ONLY", - "READ_WRITE" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "source": { - "type": "string", - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk." - }, - "type": { - "type": "string", - "description": "Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT.", - "enum": [ - "PERSISTENT", - "SCRATCH" - ], - "enumDescriptions": [ - "", - "" - ] - } - } - }, - "AttachedDiskInitializeParams": { - "id": "AttachedDiskInitializeParams", - "type": "object", - "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both.", - "properties": { - "diskName": { - "type": "string", - "description": "Specifies the disk name. If not specified, the default is to use the name of the instance." - }, - "diskSizeGb": { - "type": "string", - "description": "Specifies the size of the disk in base-2 GB.", - "format": "int64" - }, - "diskStorageType": { - "type": "string", - "description": "[Deprecated] Storage type of the disk.", - "enum": [ - "HDD", - "SSD" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "diskType": { - "type": "string", - "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example:\n\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard \n\nOther values include pd-ssd and local-ssd. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType \n- projects/project/zones/zone/diskTypes/diskType \n- zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL." - }, - "sourceImage": { - "type": "string", - "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family \n\nIf the source image is deleted later, this field will not be set." - }, - "sourceImageEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." - } - } - }, - "AuditConfig": { - "id": "AuditConfig", - "type": "object", - "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n{ \"audit_configs\": [ { \"service\": \"allServices\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:foo@gmail.com\" ] }, { \"log_type\": \"DATA_WRITE\", }, { \"log_type\": \"ADMIN_READ\", } ] }, { \"service\": \"fooservice.googleapis.com\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:bar@gmail.com\" ] } ] } ] }\n\nFor fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts foo@gmail.com from DATA_READ logging, and bar@gmail.com from DATA_WRITE logging.", - "properties": { - "auditLogConfigs": { - "type": "array", - "description": "The configuration for logging of each type of permission.", - "items": { - "$ref": "AuditLogConfig" - } - }, - "exemptedMembers": { - "type": "array", - "description": "", - "items": { - "type": "string" - } - }, - "service": { - "type": "string", - "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services." - } - } - }, - "AuditLogConfig": { - "id": "AuditLogConfig", - "type": "object", - "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:foo@gmail.com\" ] }, { \"log_type\": \"DATA_WRITE\", } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting foo@gmail.com from DATA_READ logging.", - "properties": { - "exemptedMembers": { - "type": "array", - "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of [Binding.members][].", - "items": { - "type": "string" - } - }, - "logType": { - "type": "string", - "description": "The log type that this config enables.", - "enum": [ - "ADMIN_READ", - "DATA_READ", - "DATA_WRITE", - "LOG_TYPE_UNSPECIFIED" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ] - } - } - }, - "Autoscaler": { - "id": "Autoscaler", - "type": "object", - "description": "Represents an Autoscaler resource. Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define. For more information, read Autoscaling Groups of Instances.", - "properties": { - "autoscalingPolicy": { - "$ref": "AutoscalingPolicy", - "description": "The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization.\n\nIf none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#autoscaler for autoscalers.", - "default": "compute#autoscaler" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.instanceGroups.insert" - ] - } - }, - "region": { - "type": "string", - "description": "[Output Only] URL of the region where the instance group resides (for autoscalers living in regional scope)." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "status": { - "type": "string", - "description": "[Output Only] The status of the autoscaler configuration.", - "enum": [ - "ACTIVE", - "DELETING", - "ERROR", - "PENDING" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ] - }, - "statusDetails": { - "type": "array", - "description": "[Output Only] Human-readable details about the current state of the autoscaler. Read the documentation for Commonly returned status messages for examples of status messages you might encounter.", - "items": { - "$ref": "AutoscalerStatusDetails" - } - }, - "target": { - "type": "string", - "description": "URL of the managed instance group that this autoscaler will scale." - }, - "zone": { - "type": "string", - "description": "[Output Only] URL of the zone where the instance group resides (for autoscalers living in zonal scope)." - } - } - }, - "AutoscalerAggregatedList": { - "id": "AutoscalerAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "A map of scoped autoscaler lists.", - "additionalProperties": { - "$ref": "AutoscalersScopedList", - "description": "[Output Only] Name of the scope containing this set of autoscalers." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#autoscalerAggregatedList for aggregated lists of autoscalers.", - "default": "compute#autoscalerAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "AutoscalerList": { - "id": "AutoscalerList", - "type": "object", - "description": "Contains a list of Autoscaler resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of Autoscaler resources.", - "items": { - "$ref": "Autoscaler" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#autoscalerList for lists of autoscalers.", - "default": "compute#autoscalerList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "AutoscalerStatusDetails": { - "id": "AutoscalerStatusDetails", - "type": "object", - "properties": { - "message": { - "type": "string", - "description": "The status message." - }, - "type": { - "type": "string", - "description": "The type of error returned.", - "enum": [ - "ALL_INSTANCES_UNHEALTHY", - "BACKEND_SERVICE_DOES_NOT_EXIST", - "CAPPED_AT_MAX_NUM_REPLICAS", - "CUSTOM_METRIC_DATA_POINTS_TOO_SPARSE", - "CUSTOM_METRIC_INVALID", - "MIN_EQUALS_MAX", - "MISSING_CUSTOM_METRIC_DATA_POINTS", - "MISSING_LOAD_BALANCING_DATA_POINTS", - "MORE_THAN_ONE_BACKEND_SERVICE", - "NOT_ENOUGH_QUOTA_AVAILABLE", - "SCALING_TARGET_DOES_NOT_EXIST", - "UNKNOWN", - "UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - } - } - }, - "AutoscalersScopedList": { - "id": "AutoscalersScopedList", - "type": "object", - "properties": { - "autoscalers": { - "type": "array", - "description": "[Output Only] List of autoscalers contained in this scope.", - "items": { - "$ref": "Autoscaler" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] Informational warning which replaces the list of autoscalers when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "AutoscalingPolicy": { - "id": "AutoscalingPolicy", - "type": "object", - "description": "Cloud Autoscaler policy.", - "properties": { - "coolDownPeriodSec": { - "type": "integer", - "description": "The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds.\n\nVirtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", - "format": "int32" - }, - "cpuUtilization": { - "$ref": "AutoscalingPolicyCpuUtilization", - "description": "Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group." - }, - "customMetricUtilizations": { - "type": "array", - "description": "Configuration parameters of autoscaling based on a custom metric.", - "items": { - "$ref": "AutoscalingPolicyCustomMetricUtilization" - } - }, - "loadBalancingUtilization": { - "$ref": "AutoscalingPolicyLoadBalancingUtilization", - "description": "Configuration parameters of autoscaling based on load balancer." - }, - "maxNumReplicas": { - "type": "integer", - "description": "The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.", - "format": "int32" - }, - "minNumReplicas": { - "type": "integer", - "description": "The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.", - "format": "int32" - } - } - }, - "AutoscalingPolicyCpuUtilization": { - "id": "AutoscalingPolicyCpuUtilization", - "type": "object", - "description": "CPU utilization policy.", - "properties": { - "utilizationTarget": { - "type": "number", - "description": "The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6.\n\nIf the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization.\n\nIf the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.", - "format": "double" - } - } - }, - "AutoscalingPolicyCustomMetricUtilization": { - "id": "AutoscalingPolicyCustomMetricUtilization", - "type": "object", - "description": "Custom utilization metric policy.", - "properties": { - "metric": { - "type": "string", - "description": "The identifier of the Stackdriver Monitoring metric. The metric cannot have negative values and should be a utilization metric, which means that the number of virtual machines handling requests should increase or decrease proportionally to the metric. The metric must also have a label of compute.googleapis.com/resource_id with the value of the instance's unique ID, although this alone does not guarantee that the metric is valid.\n\nFor example, the following is a valid metric:\ncompute.googleapis.com/instance/network/received_bytes_count\nThe following is not a valid metric because it does not increase or decrease based on usage:\ncompute.googleapis.com/instance/cpu/reserved_cores" - }, - "utilizationTarget": { - "type": "number", - "description": "Target value of the metric which autoscaler should maintain. Must be a positive value.", - "format": "double" - }, - "utilizationTargetType": { - "type": "string", - "description": "Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE. If not specified, the default is GAUGE.", - "enum": [ - "DELTA_PER_MINUTE", - "DELTA_PER_SECOND", - "GAUGE" - ], - "enumDescriptions": [ - "", - "", - "" - ] - } - } - }, - "AutoscalingPolicyLoadBalancingUtilization": { - "id": "AutoscalingPolicyLoadBalancingUtilization", - "type": "object", - "description": "Configuration parameters of autoscaling based on load balancing.", - "properties": { - "utilizationTarget": { - "type": "number", - "description": "Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.", - "format": "double" - } - } - }, - "Backend": { - "id": "Backend", - "type": "object", - "description": "Message containing information of one individual backend.", - "properties": { - "balancingMode": { - "type": "string", - "description": "Specifies the balancing mode for this backend. For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL).\n\nThis cannot be used for internal load balancing.", - "enum": [ - "CONNECTION", - "RATE", - "UTILIZATION" - ], - "enumDescriptions": [ - "", - "", - "" - ] - }, - "capacityScaler": { - "type": "number", - "description": "A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, RATE or CONNECTION). Default value is 1, which means the group will serve up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available Capacity. Valid range is [0.0,1.0].\n\nThis cannot be used for internal load balancing.", - "format": "float" - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "group": { - "type": "string", - "description": "The fully-qualified URL of a zonal Instance Group resource. This instance group defines the list of instances that serve traffic. Member virtual machine instances from each instance group must live in the same zone as the instance group itself. No two backends in a backend service are allowed to use same Instance Group resource.\n\nNote that you must specify an Instance Group resource using the fully-qualified URL, rather than a partial URL.\n\nWhen the BackendService has load balancing scheme INTERNAL, the instance group must be in a zone within the same region as the BackendService." - }, - "maxConnections": { - "type": "integer", - "description": "The max number of simultaneous connections for the group. Can be used with either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", - "format": "int32" - }, - "maxConnectionsPerInstance": { - "type": "integer", - "description": "The max number of simultaneous connections that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", - "format": "int32" - }, - "maxRate": { - "type": "integer", - "description": "The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", - "format": "int32" - }, - "maxRatePerInstance": { - "type": "number", - "description": "The max requests per second (RPS) that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", - "format": "float" - }, - "maxUtilization": { - "type": "number", - "description": "Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0.0, 1.0].\n\nThis cannot be used for internal load balancing.", - "format": "float" - } - } - }, - "BackendBucket": { - "id": "BackendBucket", - "type": "object", - "description": "A BackendBucket resource. This resource defines a Cloud Storage bucket.", - "properties": { - "bucketName": { - "type": "string", - "description": "Cloud Storage bucket name." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional textual description of the resource; provided by the client when the resource is created." - }, - "enableCdn": { - "type": "boolean", - "description": "If true, enable Cloud CDN for this BackendBucket." - }, - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "Type of the resource.", - "default": "compute#backendBucket" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - } - } - }, - "BackendBucketList": { - "id": "BackendBucketList", - "type": "object", - "description": "Contains a list of BackendBucket resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of BackendBucket resources.", - "items": { - "$ref": "BackendBucket" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#backendBucketList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] A token used to continue a truncated list request." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "BackendService": { - "id": "BackendService", - "type": "object", - "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity.", - "properties": { - "affinityCookieTtlSec": { - "type": "integer", - "description": "Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value for TTL is one day.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", - "format": "int32" - }, - "backends": { - "type": "array", - "description": "The list of backends that serve this BackendService.", - "items": { - "$ref": "Backend" - } - }, - "cdnPolicy": { - "$ref": "BackendServiceCdnPolicy", - "description": "Cloud CDN configuration for this BackendService." - }, - "connectionDraining": { - "$ref": "ConnectionDraining" - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "enableCDN": { - "type": "boolean", - "description": "If true, enable Cloud CDN for this BackendService.\n\nWhen the load balancing scheme is INTERNAL, this field is not used." - }, - "fingerprint": { - "type": "string", - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService.", - "format": "byte" - }, - "healthChecks": { - "type": "array", - "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", - "items": { - "type": "string" - } - }, - "iap": { - "$ref": "BackendServiceIAP" - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#backendService for backend services.", - "default": "compute#backendService" - }, - "loadBalancingScheme": { - "type": "string", - "enum": [ - "EXTERNAL", - "INTERNAL", - "INVALID_LOAD_BALANCING_SCHEME" - ], - "enumDescriptions": [ - "", - "", - "" - ] - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "port": { - "type": "integer", - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nThis cannot be used for internal load balancing.", - "format": "int32" - }, - "portName": { - "type": "string", - "description": "Name of backend port. The same name should appear in the instance groups referenced by this service. Required when the load balancing scheme is EXTERNAL.\n\nWhen the load balancing scheme is INTERNAL, this field is not used." - }, - "protocol": { - "type": "string", - "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, TCP, and SSL. The default is HTTP.\n\nFor internal load balancing, the possible values are TCP and UDP, and the default is TCP.", - "enum": [ - "HTTP", - "HTTPS", - "SSL", - "TCP", - "UDP" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ] - }, - "region": { - "type": "string", - "description": "[Output Only] URL of the region where the regional backend service resides. This field is not applicable to global backend services." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "sessionAffinity": { - "type": "string", - "description": "Type of session affinity to use. The default is NONE.\n\nWhen the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, or GENERATED_COOKIE.\n\nWhen the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the protocol is UDP, this field is not used.", - "enum": [ - "CLIENT_IP", - "CLIENT_IP_PORT_PROTO", - "CLIENT_IP_PROTO", - "GENERATED_COOKIE", - "NONE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ] - }, - "timeoutSec": { - "type": "integer", - "description": "How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds.", - "format": "int32" - } - } - }, - "BackendServiceAggregatedList": { - "id": "BackendServiceAggregatedList", - "type": "object", - "description": "Contains a list of BackendServicesScopedList.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "object", - "description": "A map of scoped BackendService lists.", - "additionalProperties": { - "$ref": "BackendServicesScopedList", - "description": "Name of the scope containing this set of BackendServices." - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#backendServiceAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] A token used to continue a truncated list request." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "BackendServiceCdnPolicy": { - "id": "BackendServiceCdnPolicy", - "type": "object", - "description": "Message containing Cloud CDN configuration for a backend service.", - "properties": { - "cacheKeyPolicy": { - "$ref": "CacheKeyPolicy", - "description": "The CacheKeyPolicy for this CdnPolicy." - } - } - }, - "BackendServiceGroupHealth": { - "id": "BackendServiceGroupHealth", - "type": "object", - "properties": { - "healthStatus": { - "type": "array", - "items": { - "$ref": "HealthStatus" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#backendServiceGroupHealth for the health of backend services.", - "default": "compute#backendServiceGroupHealth" - } - } - }, - "BackendServiceIAP": { - "id": "BackendServiceIAP", - "type": "object", - "description": "Identity-Aware Proxy", - "properties": { - "enabled": { - "type": "boolean" - }, - "oauth2ClientId": { - "type": "string" - }, - "oauth2ClientSecret": { - "type": "string" - }, - "oauth2ClientSecretSha256": { - "type": "string" - } - } - }, - "BackendServiceList": { - "id": "BackendServiceList", - "type": "object", - "description": "Contains a list of BackendService resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of BackendService resources.", - "items": { - "$ref": "BackendService" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#backendServiceList for lists of backend services.", - "default": "compute#backendServiceList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "BackendServicesScopedList": { - "id": "BackendServicesScopedList", - "type": "object", - "properties": { - "backendServices": { - "type": "array", - "description": "List of BackendServices contained in this scope.", - "items": { - "$ref": "BackendService" - } - }, - "warning": { - "type": "object", - "description": "Informational warning which replaces the list of backend services when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "Binding": { - "id": "Binding", - "type": "object", - "description": "Associates `members` with a `role`.", - "properties": { - "members": { - "type": "array", - "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the users of that domain. For example, `google.com` or `example.com`.", - "items": { - "type": "string" - } - }, - "role": { - "type": "string", - "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`." - } - } - }, - "CacheInvalidationRule": { - "id": "CacheInvalidationRule", - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "If set, this invalidation rule will only apply to requests with a Host header matching host." - }, - "path": { - "type": "string" - } - } - }, - "CacheKeyPolicy": { - "id": "CacheKeyPolicy", - "type": "object", - "description": "Message containing what to include in the cache key for a request for Cloud CDN.", - "properties": { - "includeHost": { - "type": "boolean", - "description": "If true, requests to different hosts will be cached separately." - }, - "includeProtocol": { - "type": "boolean", - "description": "If true, http and https requests will be cached separately." - }, - "includeQueryString": { - "type": "boolean", - "description": "If true, include query string parameters in the cache key according to query_string_whitelist and query_string_blacklist. If neither is set, the entire query string will be included. If false, the query string will be excluded from the cache key entirely." - }, - "queryStringBlacklist": { - "type": "array", - "description": "Names of query string parameters to exclude in cache keys. All other parameters will be included. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters.", - "items": { - "type": "string" - } - }, - "queryStringWhitelist": { - "type": "array", - "description": "Names of query string parameters to include in cache keys. All other parameters will be excluded. Either specify query_string_whitelist or query_string_blacklist, not both. '&' and '=' will be percent encoded and not treated as delimiters.", - "items": { - "type": "string" - } - } - } - }, - "Commitment": { - "id": "Commitment", - "type": "object", - "description": "A usage-commitment with a start / end time. Users create commitments for particular resources (e.g. memory). Actual usage is first deducted from available commitments made prior, perhaps at a reduced price (as laid out in the commitment).", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "endTimestamp": { - "type": "string", - "description": "[Output Only] Commitment end time in RFC3339 text format." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#commitment for commitments.", - "default": "compute#commitment" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "plan": { - "type": "string", - "description": "The plan for this commitment, which determines duration and discount rate. The currently supported plans are TWELVE_MONTH (1 year), and THIRTY_SIX_MONTH (3 years).", - "enum": [ - "INVALID", - "THIRTY_SIX_MONTH", - "TWELVE_MONTH" - ], - "enumDescriptions": [ - "", - "", - "" - ] - }, - "region": { - "type": "string", - "description": "[Output Only] URL of the region where this commitment may be used." - }, - "resources": { - "type": "array", - "description": "List of commitment amounts for particular resources. Note that VCPU and MEMORY resource commitments must occur together.", - "items": { - "$ref": "ResourceCommitment" - } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "startTimestamp": { - "type": "string", - "description": "[Output Only] Commitment start time in RFC3339 text format." - }, - "status": { - "type": "string", - "description": "[Output Only] Status of the commitment with regards to eventual expiration (each commitment has an end-date defined). One of the following values: NOT_YET_ACTIVE, ACTIVE, EXPIRED.", - "enum": [ - "ACTIVE", - "CREATING", - "EXPIRED", - "NOT_YET_ACTIVE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ] - }, - "statusMessage": { - "type": "string", - "description": "[Output Only] An optional, human-readable explanation of the status." - } - } - }, - "CommitmentAggregatedList": { - "id": "CommitmentAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "Commitments by scope.", - "additionalProperties": { - "$ref": "CommitmentsScopedList", - "description": "[Output Only] Name of the scope containing this set of commitments." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#commitmentAggregatedList for aggregated lists of commitments.", - "default": "compute#commitmentAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "CommitmentList": { - "id": "CommitmentList", - "type": "object", - "description": "Contains a list of Commitment resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of Commitment resources.", - "items": { - "$ref": "Commitment" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#commitmentList for lists of commitments.", - "default": "compute#commitmentList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "CommitmentsScopedList": { - "id": "CommitmentsScopedList", - "type": "object", - "properties": { - "commitments": { - "type": "array", - "description": "[Output Only] List of commitments contained in this scope.", - "items": { - "$ref": "Commitment" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] Informational warning which replaces the list of commitments when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "Condition": { - "id": "Condition", - "type": "object", - "description": "A condition to be met.", - "properties": { - "iam": { - "type": "string", - "description": "Trusted attributes supplied by the IAM system.", - "enum": [ - "APPROVER", - "ATTRIBUTION", - "AUTHORITY", - "JUSTIFICATION_TYPE", - "NO_ATTR", - "SECURITY_REALM" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ] - }, - "op": { - "type": "string", - "description": "An operator to apply the subject with.", - "enum": [ - "DISCHARGED", - "EQUALS", - "IN", - "NOT_EQUALS", - "NOT_IN", - "NO_OP" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ] - }, - "svc": { - "type": "string", - "description": "Trusted attributes discharged by the service." - }, - "sys": { - "type": "string", - "description": "Trusted attributes supplied by any service that owns resources and uses the IAM system for access control.", - "enum": [ - "IP", - "NAME", - "NO_ATTR", - "REGION", - "SERVICE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ] - }, - "value": { - "type": "string", - "description": "DEPRECATED. Use 'values' instead." - }, - "values": { - "type": "array", - "description": "The objects of the condition. This is mutually exclusive with 'value'.", - "items": { - "type": "string" - } - } - } - }, - "ConnectionDraining": { - "id": "ConnectionDraining", - "type": "object", - "description": "Message containing connection draining configuration.", - "properties": { - "drainingTimeoutSec": { - "type": "integer", - "description": "Time for which instance will be drained (not accept new connections, but still work to finish started).", - "format": "int32" - } - } - }, - "CustomerEncryptionKey": { - "id": "CustomerEncryptionKey", - "type": "object", - "description": "Represents a customer-supplied encryption key", - "properties": { - "rawKey": { - "type": "string", - "description": "Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource." - }, - "rsaEncryptedKey": { - "type": "string", - "description": "Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource.\n\nThe key must meet the following requirements before you can provide it to Compute Engine: \n- The key is wrapped using a RSA public key certificate provided by Google. \n- After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Get the RSA public key certificate provided by Google at:\nhttps://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem" - }, - "sha256": { - "type": "string", - "description": "[Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource." - } - } - }, - "CustomerEncryptionKeyProtectedDisk": { - "id": "CustomerEncryptionKeyProtectedDisk", - "type": "object", - "properties": { - "diskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Decrypts data associated with the disk with a customer-supplied encryption key." - }, - "source": { - "type": "string", - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks." - } - } - }, - "DeprecationStatus": { - "id": "DeprecationStatus", - "type": "object", - "description": "Deprecation status for a public resource.", - "properties": { - "deleted": { - "type": "string", - "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it." - }, - "deprecated": { - "type": "string", - "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it." - }, - "obsolete": { - "type": "string", - "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it." - }, - "replacement": { - "type": "string", - "description": "The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource." - }, - "state": { - "type": "string", - "description": "The deprecation state of this resource. This can be DEPRECATED, OBSOLETE, or DELETED. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error.", - "enum": [ - "DELETED", - "DEPRECATED", - "OBSOLETE" - ], - "enumDescriptions": [ - "", - "", - "" - ] - } - } - }, - "Disk": { - "id": "Disk", - "type": "object", - "description": "A Disk resource.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "diskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot or an image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#disk for disks.", - "default": "compute#disk" - }, - "labelFingerprint": { - "type": "string", - "description": "A fingerprint for the labels being applied to this disk, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make a get() request to retrieve a disk.", - "format": "byte" - }, - "labels": { - "type": "object", - "description": "Labels to apply to this disk. These can be later modified by the setLabels method.", - "additionalProperties": { - "type": "string" - } - }, - "lastAttachTimestamp": { - "type": "string", - "description": "[Output Only] Last attach timestamp in RFC3339 text format." - }, - "lastDetachTimestamp": { - "type": "string", - "description": "[Output Only] Last detach timestamp in RFC3339 text format." - }, - "licenses": { - "type": "array", - "description": "Any applicable publicly visible licenses.", - "items": { - "type": "string" - } - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.disks.insert" - ] - } - }, - "options": { - "type": "string", - "description": "Internal use only." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined fully-qualified URL for this resource." - }, - "sizeGb": { - "type": "string", - "description": "Size of the persistent disk, specified in GB. You can specify this field when creating a persistent disk using the sourceImage or sourceSnapshot parameter, or specify it alone to create an empty persistent disk.\n\nIf you specify this field along with sourceImage or sourceSnapshot, the value of sizeGb must not be less than the size of the sourceImage or the size of the snapshot.", - "format": "int64" - }, - "sourceImage": { - "type": "string", - "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family" - }, - "sourceImageEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." - }, - "sourceImageId": { - "type": "string", - "description": "[Output Only] The ID value of the image used to create this disk. This value identifies the exact image that was used to create this persistent disk. For example, if you created the persistent disk from an image that was later deleted and recreated under the same name, the source image ID would identify the exact version of the image that was used." - }, - "sourceSnapshot": { - "type": "string", - "description": "The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot \n- projects/project/global/snapshots/snapshot \n- global/snapshots/snapshot" - }, - "sourceSnapshotEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key." - }, - "sourceSnapshotId": { - "type": "string", - "description": "[Output Only] The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used." - }, - "status": { - "type": "string", - "description": "[Output Only] The status of disk creation.", - "enum": [ - "CREATING", - "FAILED", - "READY", - "RESTORING" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ] - }, - "storageType": { - "type": "string", - "description": "[Deprecated] Storage type of the persistent disk.", - "enum": [ - "HDD", - "SSD" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "type": { - "type": "string", - "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk." - }, - "users": { - "type": "array", - "description": "[Output Only] Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance", - "items": { - "type": "string" - } - }, - "zone": { - "type": "string", - "description": "[Output Only] URL of the zone where the disk resides." - } - } - }, - "DiskAggregatedList": { - "id": "DiskAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "[Output Only] A map of scoped disk lists.", - "additionalProperties": { - "$ref": "DisksScopedList", - "description": "[Output Only] Name of the scope containing this set of disks." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#diskAggregatedList for aggregated lists of persistent disks.", - "default": "compute#diskAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results. Acceptable values are 0 to 500, inclusive. (Default: 500)" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "DiskList": { - "id": "DiskList", - "type": "object", - "description": "A list of Disk resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of Disk resources.", - "items": { - "$ref": "Disk" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#diskList for lists of disks.", - "default": "compute#diskList" - }, - "nextPageToken": { - "type": "string", - "description": "This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "DiskMoveRequest": { - "id": "DiskMoveRequest", - "type": "object", - "properties": { - "destinationZone": { - "type": "string", - "description": "The URL of the destination zone to move the disk. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone" - }, - "targetDisk": { - "type": "string", - "description": "The URL of the target disk to move. This can be a full or partial URL. For example, the following are all valid URLs to a disk: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk" - } - } - }, - "DiskType": { - "id": "DiskType", - "type": "object", - "description": "A DiskType resource.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "defaultDiskSizeGb": { - "type": "string", - "description": "[Output Only] Server-defined default disk size in GB.", - "format": "int64" - }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this disk type." - }, - "description": { - "type": "string", - "description": "[Output Only] An optional description of this resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#diskType for disk types.", - "default": "compute#diskType" - }, - "name": { - "type": "string", - "description": "[Output Only] Name of the resource.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "validDiskSize": { - "type": "string", - "description": "[Output Only] An optional textual description of the valid disk size, such as \"10GB-10TB\"." - }, - "zone": { - "type": "string", - "description": "[Output Only] URL of the zone where the disk type resides." - } - } - }, - "DiskTypeAggregatedList": { - "id": "DiskTypeAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "[Output Only] A map of scoped disk type lists.", - "additionalProperties": { - "$ref": "DiskTypesScopedList", - "description": "[Output Only] Name of the scope containing this set of disk types." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#diskTypeAggregatedList.", - "default": "compute#diskTypeAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "DiskTypeList": { - "id": "DiskTypeList", - "type": "object", - "description": "Contains a list of disk types.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of Disk Type resources.", - "items": { - "$ref": "DiskType" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#diskTypeList for disk types.", - "default": "compute#diskTypeList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "DiskTypesScopedList": { - "id": "DiskTypesScopedList", - "type": "object", - "properties": { - "diskTypes": { - "type": "array", - "description": "[Output Only] List of disk types contained in this scope.", - "items": { - "$ref": "DiskType" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] Informational warning which replaces the list of disk types when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "DisksResizeRequest": { - "id": "DisksResizeRequest", - "type": "object", - "properties": { - "sizeGb": { - "type": "string", - "description": "The new size of the persistent disk, which is specified in GB.", - "format": "int64" - } - } - }, - "DisksScopedList": { - "id": "DisksScopedList", - "type": "object", - "properties": { - "disks": { - "type": "array", - "description": "[Output Only] List of disks contained in this scope.", - "items": { - "$ref": "Disk" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] Informational warning which replaces the list of disks when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "Firewall": { - "id": "Firewall", - "type": "object", - "description": "Represents a Firewall resource.", - "properties": { - "allowed": { - "type": "array", - "description": "The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", - "items": { - "type": "object", - "properties": { - "IPProtocol": { - "type": "string", - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol number." - }, - "ports": { - "type": "array", - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", - "items": { - "type": "string" - } - } - } - } - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "denied": { - "type": "array", - "description": "The list of DENY rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", - "items": { - "type": "object", - "properties": { - "IPProtocol": { - "type": "string", - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol number." - }, - "ports": { - "type": "array", - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", - "items": { - "type": "string" - } - } - } - } - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "destinationRanges": { - "type": "array", - "description": "If destination ranges are specified, the firewall will apply only to traffic that has destination IP address in these ranges. These ranges must be expressed in CIDR format. Only IPv4 is supported.", - "items": { - "type": "string" - } - }, - "direction": { - "type": "string", - "description": "Direction of traffic to which this firewall applies; default is INGRESS. Note: For INGRESS traffic, it is NOT supported to specify destinationRanges; For EGRESS traffic, it is NOT supported to specify sourceRanges OR sourceTags.", - "enum": [ - "EGRESS", - "INGRESS" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Ony] Type of the resource. Always compute#firewall for firewall rules.", - "default": "compute#firewall" - }, - "name": { - "type": "string", - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.firewalls.insert", - "compute.firewalls.patch" - ] - } - }, - "network": { - "type": "string", - "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used:\nglobal/networks/default\nIf you choose to specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network \n- projects/myproject/global/networks/my-network \n- global/networks/default" - }, - "priority": { - "type": "integer", - "description": "Priority for this rule. This is an integer between 0 and 65535, both inclusive. When not specified, the value assumed is 1000. Relative priorities determine precedence of conflicting rules. Lower value of priority implies higher precedence (eg, a rule with priority 0 has higher precedence than a rule with priority 1). DENY rules take precedence over ALLOW rules having equal priority.", - "format": "int32" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "sourceRanges": { - "type": "array", - "description": "If source ranges are specified, the firewall will apply only to traffic that has source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply. Only IPv4 is supported.", - "items": { - "type": "string" - } - }, - "sourceTags": { - "type": "array", - "description": "If source tags are specified, the firewall will apply only to traffic with source IP that belongs to a tag listed in source tags. Source tags cannot be used to control traffic to an instance's external IP address. Because tags are associated with an instance, not an IP address. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", - "items": { - "type": "string" - } - }, - "targetTags": { - "type": "array", - "description": "A list of instance tags indicating sets of instances located in the network that may make network connections as specified in allowed[]. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", - "items": { - "type": "string" - } - } - } - }, - "FirewallList": { - "id": "FirewallList", - "type": "object", - "description": "Contains a list of firewalls.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of Firewall resources.", - "items": { - "$ref": "Firewall" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#firewallList for lists of firewalls.", - "default": "compute#firewallList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "ForwardingRule": { - "id": "ForwardingRule", - "type": "object", - "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, ports] tuple.", - "properties": { - "IPAddress": { - "type": "string", - "description": "The IP address that this forwarding rule is serving on behalf of.\n\nFor global forwarding rules, the address must be a global IP. For regional forwarding rules, the address must live in the same region as the forwarding rule. By default, this field is empty and an ephemeral IP from the same scope (global or regional) will be assigned.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnetwork configured for the forwarding rule. A reserved address cannot be used. If the field is empty, the IP address will be automatically allocated from the internal IP range of the subnetwork or network configured for this forwarding rule. Only IPv4 is supported." - }, - "IPProtocol": { - "type": "string", - "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nWhen the load balancing scheme is INTERNAL, only TCP and UDP are valid.", - "enum": [ - "AH", - "ESP", - "ICMP", - "SCTP", - "TCP", - "UDP" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ] - }, - "backendService": { - "type": "string", - "description": "This field is not used for external load balancing.\n\nFor internal load balancing, this field identifies the BackendService resource to receive the matched traffic." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "ipVersion": { - "type": "string", - "description": "The IP Version that will be used by this forwarding rule. Valid options are IPV4 or IPV6. This can only be specified for a global forwarding rule.", - "enum": [ - "IPV4", - "IPV6", - "UNSPECIFIED_VERSION" - ], - "enumDescriptions": [ - "", - "", - "" - ] - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", - "default": "compute#forwardingRule" - }, - "loadBalancingScheme": { - "type": "string", - "description": "This signifies what the ForwardingRule will be used for and can only take the following values: INTERNAL, EXTERNAL The value of INTERNAL means that this will be used for Internal Network Load Balancing (TCP, UDP). The value of EXTERNAL means that this will be used for External Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy)", - "enum": [ - "EXTERNAL", - "INTERNAL", - "INVALID" - ], - "enumDescriptions": [ - "", - "", - "" - ] - }, - "name": { - "type": "string", - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "network": { - "type": "string", - "description": "This field is not used for external load balancing.\n\nFor internal load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used." - }, - "portRange": { - "type": "string", - "description": "This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.\n\nApplicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nSome types of forwarding target have constraints on the acceptable ports: \n- TargetHttpProxy: 80, 8080 \n- TargetHttpsProxy: 443 \n- TargetSslProxy: 443 \n- TargetVpnGateway: 500, 4500\n-" - }, - "ports": { - "type": "array", - "description": "This field is used along with the backend_service field for internal load balancing.\n\nWhen the load balancing scheme is INTERNAL, a single port or a comma separated list of ports can be configured. Only packets addressed to these ports will be forwarded to the backends configured with this forwarding rule.\n\nYou may specify a maximum of up to 5 ports.", - "items": { - "type": "string" - } - }, - "region": { - "type": "string", - "description": "[Output Only] URL of the region where the regional forwarding rule resides. This field is not applicable to global forwarding rules." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "serviceLabel": { - "type": "string", - "description": "An optional prefix to the service name for this Forwarding Rule. If specified, will be the first label of the fully qualified service name.\n\nThe label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.\n\nThis field is only used for internal load balancing.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "serviceName": { - "type": "string", - "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule.\n\nThis field is only used for internal load balancing." - }, - "subnetwork": { - "type": "string", - "description": "This field is not used for external load balancing.\n\nFor internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule.\n\nIf the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified." - }, - "target": { - "type": "string", - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object.\n\nThis field is not used for internal load balancing." - } - } - }, - "ForwardingRuleAggregatedList": { - "id": "ForwardingRuleAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "A map of scoped forwarding rule lists.", - "additionalProperties": { - "$ref": "ForwardingRulesScopedList", - "description": "Name of the scope containing this set of addresses." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#forwardingRuleAggregatedList for lists of forwarding rules.", - "default": "compute#forwardingRuleAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "ForwardingRuleList": { - "id": "ForwardingRuleList", - "type": "object", - "description": "Contains a list of ForwardingRule resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource. Set by the server." - }, - "items": { - "type": "array", - "description": "A list of ForwardingRule resources.", - "items": { - "$ref": "ForwardingRule" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#forwardingRuleList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "ForwardingRulesScopedList": { - "id": "ForwardingRulesScopedList", - "type": "object", - "properties": { - "forwardingRules": { - "type": "array", - "description": "List of forwarding rules contained in this scope.", - "items": { - "$ref": "ForwardingRule" - } - }, - "warning": { - "type": "object", - "description": "Informational warning which replaces the list of forwarding rules when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "GlobalSetLabelsRequest": { - "id": "GlobalSetLabelsRequest", - "type": "object", - "properties": { - "labelFingerprint": { - "type": "string", - "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash when updating or changing labels. Make a get() request to the resource to get the latest fingerprint.", - "format": "byte" - }, - "labels": { - "type": "object", - "description": "A list of labels to apply for this resource. Each label key & value must comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. For example, \"webserver-frontend\": \"images\". A label value can also be empty (e.g. \"my-label\": \"\").", - "additionalProperties": { - "type": "string" - } - } - } - }, - "GuestOsFeature": { - "id": "GuestOsFeature", - "type": "object", - "description": "Guest OS features.", - "properties": { - "type": { - "type": "string", - "description": "The type of supported feature. Currenty only VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the server might also populate this property with the value WINDOWS to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", - "enum": [ - "FEATURE_TYPE_UNSPECIFIED", - "MULTI_IP_SUBNET", - "VIRTIO_SCSI_MULTIQUEUE", - "WINDOWS" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ] - } - } - }, - "HTTPHealthCheck": { - "id": "HTTPHealthCheck", - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used." - }, - "port": { - "type": "integer", - "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", - "format": "int32" - }, - "portName": { - "type": "string", - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." - }, - "proxyHeader": { - "type": "string", - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "requestPath": { - "type": "string", - "description": "The request path of the HTTP health check request. The default value is /." - } - } - }, - "HTTPSHealthCheck": { - "id": "HTTPSHealthCheck", - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used." - }, - "port": { - "type": "integer", - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", - "format": "int32" - }, - "portName": { - "type": "string", - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." - }, - "proxyHeader": { - "type": "string", - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "requestPath": { - "type": "string", - "description": "The request path of the HTTPS health check request. The default value is /." - } - } - }, - "HealthCheck": { - "id": "HealthCheck", - "type": "object", - "description": "An HealthCheck resource. This resource defines a template for how individual virtual machines should be checked for health, via one of the supported protocols.", - "properties": { - "checkIntervalSec": { - "type": "integer", - "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", - "format": "int32" - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in 3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "healthyThreshold": { - "type": "integer", - "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", - "format": "int32" - }, - "httpHealthCheck": { - "$ref": "HTTPHealthCheck" - }, - "httpsHealthCheck": { - "$ref": "HTTPSHealthCheck" - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "Type of the resource.", - "default": "compute#healthCheck" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "sslHealthCheck": { - "$ref": "SSLHealthCheck" - }, - "tcpHealthCheck": { - "$ref": "TCPHealthCheck" - }, - "timeoutSec": { - "type": "integer", - "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", - "format": "int32" - }, - "type": { - "type": "string", - "description": "Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If not specified, the default is TCP. Exactly one of the protocol-specific health check field must be specified, which must match type field.", - "enum": [ - "HTTP", - "HTTPS", - "INVALID", - "SSL", - "TCP", - "UDP" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ] - }, - "udpHealthCheck": { - "$ref": "UDPHealthCheck" - }, - "unhealthyThreshold": { - "type": "integer", - "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", - "format": "int32" - } - } - }, - "HealthCheckList": { - "id": "HealthCheckList", - "type": "object", - "description": "Contains a list of HealthCheck resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of HealthCheck resources.", - "items": { - "$ref": "HealthCheck" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#healthCheckList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "HealthCheckReference": { - "id": "HealthCheckReference", - "type": "object", - "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", - "properties": { - "healthCheck": { - "type": "string" - } - } - }, - "HealthStatus": { - "id": "HealthStatus", - "type": "object", - "properties": { - "healthState": { - "type": "string", - "description": "Health state of the instance.", - "enum": [ - "HEALTHY", - "UNHEALTHY" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "instance": { - "type": "string", - "description": "URL of the instance resource." - }, - "ipAddress": { - "type": "string", - "description": "The IP address represented by this resource." - }, - "port": { - "type": "integer", - "description": "The port on the instance.", - "format": "int32" - } - } - }, - "HostRule": { - "id": "HostRule", - "type": "object", - "description": "UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.", - "properties": { - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "hosts": { - "type": "array", - "description": "The list of host patterns to match. They must be valid hostnames, except * will match any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..", - "items": { - "type": "string" - } - }, - "pathMatcher": { - "type": "string", - "description": "The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion." - } - } - }, - "HttpHealthCheck": { - "id": "HttpHealthCheck", - "type": "object", - "description": "An HttpHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTP.", - "properties": { - "checkIntervalSec": { - "type": "integer", - "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", - "format": "int32" - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "healthyThreshold": { - "type": "integer", - "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", - "format": "int32" - }, - "host": { - "type": "string", - "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#httpHealthCheck for HTTP health checks.", - "default": "compute#httpHealthCheck" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "port": { - "type": "integer", - "description": "The TCP port number for the HTTP health check request. The default value is 80.", - "format": "int32" - }, - "requestPath": { - "type": "string", - "description": "The request path of the HTTP health check request. The default value is /." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "timeoutSec": { - "type": "integer", - "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", - "format": "int32" - }, - "unhealthyThreshold": { - "type": "integer", - "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", - "format": "int32" - } - } - }, - "HttpHealthCheckList": { - "id": "HttpHealthCheckList", - "type": "object", - "description": "Contains a list of HttpHealthCheck resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource. Defined by the server." - }, - "items": { - "type": "array", - "description": "A list of HttpHealthCheck resources.", - "items": { - "$ref": "HttpHealthCheck" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#httpHealthCheckList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "HttpsHealthCheck": { - "id": "HttpsHealthCheck", - "type": "object", - "description": "An HttpsHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTPS.", - "properties": { - "checkIntervalSec": { - "type": "integer", - "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", - "format": "int32" - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "healthyThreshold": { - "type": "integer", - "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", - "format": "int32" - }, - "host": { - "type": "string", - "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "Type of the resource.", - "default": "compute#httpsHealthCheck" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "port": { - "type": "integer", - "description": "The TCP port number for the HTTPS health check request. The default value is 443.", - "format": "int32" - }, - "requestPath": { - "type": "string", - "description": "The request path of the HTTPS health check request. The default value is \"/\"." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "timeoutSec": { - "type": "integer", - "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.", - "format": "int32" - }, - "unhealthyThreshold": { - "type": "integer", - "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", - "format": "int32" - } - } - }, - "HttpsHealthCheckList": { - "id": "HttpsHealthCheckList", - "type": "object", - "description": "Contains a list of HttpsHealthCheck resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of HttpsHealthCheck resources.", - "items": { - "$ref": "HttpsHealthCheck" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#httpsHealthCheckList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "Image": { - "id": "Image", - "type": "object", - "description": "An Image resource.", - "properties": { - "archiveSizeBytes": { - "type": "string", - "description": "Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).", - "format": "int64" - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "The deprecation status associated with this image." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "diskSizeGb": { - "type": "string", - "description": "Size of the image when restored onto a persistent disk (in GB).", - "format": "int64" - }, - "family": { - "type": "string", - "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035." - }, - "guestOsFeatures": { - "type": "array", - "description": "A list of features to enable on the guest OS. Applicable for bootable images only. Currently, only one feature can be enabled, VIRTIO_SCSCI_MULTIQUEUE, which allows each virtual CPU to have its own queue. For Windows images, you can only enable VIRTIO_SCSCI_MULTIQUEUE on images with driver version 1.2.0.1621 or higher. Linux images with kernel versions 3.17 and higher will support VIRTIO_SCSCI_MULTIQUEUE.\n\nFor new Windows images, the server might also populate this field with the value WINDOWS, to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", - "items": { - "$ref": "GuestOsFeature" - } - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "imageEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Encrypts the image using a customer-supplied encryption key.\n\nAfter you encrypt an image with a customer-supplied key, you must provide the same key if you use the image later (e.g. to create a disk from the image).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the image, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the image later." - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#image for images.", - "default": "compute#image" - }, - "labelFingerprint": { - "type": "string", - "description": "A fingerprint for the labels being applied to this image, which is essentially a hash of the labels used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make a get() request to retrieve an image.", - "format": "byte" - }, - "labels": { - "type": "object", - "description": "Labels to apply to this image. These can be later modified by the setLabels method.", - "additionalProperties": { - "type": "string" - } - }, - "licenses": { - "type": "array", - "description": "Any applicable license URI.", - "items": { - "type": "string" - } - }, - "name": { - "type": "string", - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.images.insert" - ] - } - }, - "rawDisk": { - "type": "object", - "description": "The parameters of the raw disk image.", - "properties": { - "containerType": { - "type": "string", - "description": "The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created.", - "enum": [ - "TAR" - ], - "enumDescriptions": [ - "" - ] - }, - "sha1Checksum": { - "type": "string", - "description": "An optional SHA1 checksum of the disk image before unpackaging; provided by the client when the disk image is created.", - "pattern": "[a-f0-9]{40}" - }, - "source": { - "type": "string", - "description": "The full Google Cloud Storage URL where the disk image is stored. You must provide either this property or the sourceDisk property but not both.", - "annotations": { - "required": [ - "compute.images.insert" - ] - } - } - } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "sourceDisk": { - "type": "string", - "description": "URL of the source disk used to create this image. This can be a full or valid partial URL. You must provide either this property or the rawDisk.source property but not both to create an image. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk" - }, - "sourceDiskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." - }, - "sourceDiskId": { - "type": "string", - "description": "The ID value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name." - }, - "sourceType": { - "type": "string", - "description": "The type of the image used to create this disk. The default and only value is RAW", - "default": "RAW", - "enum": [ - "RAW" - ], - "enumDescriptions": [ - "" - ] - }, - "status": { - "type": "string", - "description": "[Output Only] The status of the image. An image can be used to create other resources, such as instances, only after the image has been successfully created and the status is set to READY. Possible values are FAILED, PENDING, or READY.", - "enum": [ - "FAILED", - "PENDING", - "READY" - ], - "enumDescriptions": [ - "", - "", - "" - ] - } - } - }, - "ImageList": { - "id": "ImageList", - "type": "object", - "description": "Contains a list of images.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of Image resources.", - "items": { - "$ref": "Image" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#imageList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "Instance": { - "id": "Instance", - "type": "object", - "description": "An Instance resource.", - "properties": { - "canIpForward": { - "type": "boolean", - "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding." - }, - "cpuPlatform": { - "type": "string", - "description": "[Output Only] The CPU platform used by this instance." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "disks": { - "type": "array", - "description": "Array of disks associated with this instance. Persistent disks must be created before you can assign them.", - "items": { - "$ref": "AttachedDisk" - } - }, - "guestAccelerators": { - "type": "array", - "description": "List of the type and count of accelerator cards attached to the instance.", - "items": { - "$ref": "AcceleratorConfig" - } - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#instance for instances.", - "default": "compute#instance" - }, - "labelFingerprint": { - "type": "string", - "description": "A fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.\n\nTo see the latest fingerprint, make get() request to the instance.", - "format": "byte" - }, - "labels": { - "type": "object", - "description": "Labels to apply to this instance. These can be later modified by the setLabels method.", - "additionalProperties": { - "type": "string" - } - }, - "machineType": { - "type": "string", - "description": "Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type. This is provided by the client when the instance is created. For example, the following is a valid partial url to a predefined machine type:\n\nzones/us-central1-f/machineTypes/n1-standard-1 \n\nTo create a custom machine type, provide a URL to a machine type in the following format, where CPUS is 1 or an even number up to 32 (2, 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. Memory must be a multiple of 256 MB and must be supplied in MB (e.g. 5 GB of memory is 5120 MB):\n\nzones/zone/machineTypes/custom-CPUS-MEMORY \n\nFor example: zones/us-central1-f/machineTypes/custom-4-5120 \n\nFor a full list of restrictions, read the Specifications for custom machine types.", - "annotations": { - "required": [ - "compute.instances.insert" - ] - } - }, - "metadata": { - "$ref": "Metadata", - "description": "The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys." - }, - "name": { - "type": "string", - "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.instances.insert" - ] - } - }, - "networkInterfaces": { - "type": "array", - "description": "An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Only one interface is supported per instance.", - "items": { - "$ref": "NetworkInterface" - } - }, - "scheduling": { - "$ref": "Scheduling", - "description": "Sets the scheduling options for this instance." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - }, - "serviceAccounts": { - "type": "array", - "description": "A list of service accounts, with their specified scopes, authorized for this instance. Only one service account per VM instance is supported.\n\nService accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Service Accounts for more information.", - "items": { - "$ref": "ServiceAccount" - } - }, - "status": { - "type": "string", - "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, and TERMINATED.", - "enum": [ - "PROVISIONING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "statusMessage": { - "type": "string", - "description": "[Output Only] An optional, human-readable explanation of the status." - }, - "tags": { - "$ref": "Tags", - "description": "A list of tags to apply to this instance. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035." - }, - "zone": { - "type": "string", - "description": "[Output Only] URL of the zone where the instance resides." - } - } - }, - "InstanceAggregatedList": { - "id": "InstanceAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "[Output Only] A map of scoped instance lists.", - "additionalProperties": { - "$ref": "InstancesScopedList", - "description": "[Output Only] Name of the scope containing this set of instances." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#instanceAggregatedList for aggregated lists of Instance resources.", - "default": "compute#instanceAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "InstanceGroup": { - "id": "InstanceGroup", - "type": "object", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] The creation timestamp for this instance group in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "fingerprint": { - "type": "string", - "description": "[Output Only] The fingerprint of the named ports. The system uses this fingerprint to detect conflicts when multiple users change the named ports concurrently.", - "format": "byte" - }, - "id": { - "type": "string", - "description": "[Output Only] A unique identifier for this instance group, generated by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroup for instance groups.", - "default": "compute#instanceGroup" - }, - "name": { - "type": "string", - "description": "The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - } - }, - "namedPorts": { - "type": "array", - "description": "Assigns a name to a port number. For example: {name: \"http\", port: 80}\n\nThis allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: \"http\", port: 80},{name: \"http\", port: 8080}] \n\nNamed ports apply to all instances in this instance group.", - "items": { - "$ref": "NamedPort" - } - }, - "network": { - "type": "string", - "description": "The URL of the network to which all instances in the instance group belong." - }, - "region": { - "type": "string", - "description": "The URL of the region where the instance group is located (for regional resources)." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] The URL for this instance group. The server generates this URL." - }, - "size": { - "type": "integer", - "description": "[Output Only] The total number of instances in the instance group.", - "format": "int32" - }, - "subnetwork": { - "type": "string", - "description": "The URL of the subnetwork to which all instances in the instance group belong." - }, - "zone": { - "type": "string", - "description": "[Output Only] The URL of the zone where the instance group is located (for zonal resources)." - } - } - }, - "InstanceGroupAggregatedList": { - "id": "InstanceGroupAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] A unique identifier for this aggregated list of instance groups. The server generates this identifier." - }, - "items": { - "type": "object", - "description": "A map of scoped instance group lists.", - "additionalProperties": { - "$ref": "InstanceGroupsScopedList", - "description": "The name of the scope that contains this set of instance groups." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroupAggregatedList for aggregated lists of instance groups.", - "default": "compute#instanceGroupAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] The URL for this resource type. The server generates this URL." - } - } - }, - "InstanceGroupList": { - "id": "InstanceGroupList", - "type": "object", - "description": "A list of InstanceGroup resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] A unique identifier for this list of instance groups. The server generates this identifier." - }, - "items": { - "type": "array", - "description": "A list of instance groups.", - "items": { - "$ref": "InstanceGroup" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroupList for instance group lists.", - "default": "compute#instanceGroupList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] The URL for this resource type. The server generates this URL." - } - } - }, - "InstanceGroupManager": { - "id": "InstanceGroupManager", - "type": "object", - "description": "An Instance Group Manager resource.", - "properties": { - "autoHealingPolicies": { - "type": "array", - "description": "The autohealing policy for this managed instance group. You can specify only one value.", - "items": { - "$ref": "InstanceGroupManagerAutoHealingPolicy" - } - }, - "baseInstanceName": { - "type": "string", - "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", - "pattern": "[a-z][-a-z0-9]{0,57}", - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - } - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] The creation timestamp for this managed instance group in RFC3339 text format." - }, - "currentActions": { - "$ref": "InstanceGroupManagerActionsSummary", - "description": "[Output Only] The list of instance actions and the number of instances in this managed instance group that are scheduled for each of those actions." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "failoverAction": { - "type": "string", - "description": "The action to perform in case of zone failure. Only one value is supported, NO_FAILOVER. The default is NO_FAILOVER.", - "enum": [ - "NO_FAILOVER", - "UNKNOWN" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "fingerprint": { - "type": "string", - "description": "[Output Only] The fingerprint of the resource data. You can use this optional field for optimistic locking when you update the resource.", - "format": "byte" - }, - "id": { - "type": "string", - "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", - "format": "uint64" - }, - "instanceGroup": { - "type": "string", - "description": "[Output Only] The URL of the Instance Group resource." - }, - "instanceTemplate": { - "type": "string", - "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group." - }, - "kind": { - "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManager for managed instance groups.", - "default": "compute#instanceGroupManager" - }, - "name": { - "type": "string", - "description": "The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - } - }, - "namedPorts": { - "type": "array", - "description": "Named ports configured for the Instance Groups complementary to this Instance Group Manager.", - "items": { - "$ref": "NamedPort" - } - }, - "region": { - "type": "string", - "description": "[Output Only] The URL of the region where the managed instance group resides (for regional resources)." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] The URL for this managed instance group. The server defines this URL." - }, - "serviceAccount": { - "type": "string", - "description": "Service account will be used as credentials for all operations performed by managed instance group on instances. The service accounts needs all permissions required to create and delete instances. When not specified, the service account {projectNumber}@cloudservices.gserviceaccount.com will be used." - }, - "targetPools": { - "type": "array", - "description": "The URLs for all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", - "items": { - "type": "string" - } - }, - "targetSize": { - "type": "integer", - "description": "The target number of running instances for this managed instance group. Deleting or abandoning instances reduces this number. Resizing the group changes this number.", - "format": "int32", - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - } - }, - "zone": { - "type": "string", - "description": "[Output Only] The URL of the zone where the managed instance group is located (for zonal resources)." - } - } - }, - "InstanceGroupManagerActionsSummary": { - "id": "InstanceGroupManagerActionsSummary", - "type": "object", - "properties": { - "abandoning": { - "type": "integer", - "description": "[Output Only] The total number of instances in the managed instance group that are scheduled to be abandoned. Abandoning an instance removes it from the managed instance group without deleting it.", - "format": "int32" - }, - "creating": { - "type": "integer", - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create any of these instances, it tries again until it creates the instance successfully.\n\nIf you have disabled creation retries, this field will not be populated; instead, the creatingWithoutRetries field will be populated.", - "format": "int32" - }, - "creatingWithoutRetries": { - "type": "integer", - "description": "[Output Only] The number of instances that the managed instance group will attempt to create. The group attempts to create each instance only once. If the group fails to create any of these instances, it decreases the group's targetSize value accordingly.", - "format": "int32" - }, - "deleting": { - "type": "integer", - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be deleted or are currently being deleted.", - "format": "int32" - }, - "none": { - "type": "integer", - "description": "[Output Only] The number of instances in the managed instance group that are running and have no scheduled actions.", - "format": "int32" - }, - "recreating": { - "type": "integer", - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be recreated or are currently being being recreated. Recreating an instance deletes the existing root persistent disk and creates a new disk from the image that is defined in the instance template.", - "format": "int32" - }, - "refreshing": { - "type": "integer", - "description": "[Output Only] The number of instances in the managed instance group that are being reconfigured with properties that do not require a restart or a recreate action. For example, setting or removing target pools for the instance.", - "format": "int32" - }, - "restarting": { - "type": "integer", - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be restarted or are currently being restarted.", - "format": "int32" - } - } - }, - "InstanceGroupManagerAggregatedList": { - "id": "InstanceGroupManagerAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] A unique identifier for this aggregated list of managed instance groups. The server generates this identifier." - }, - "items": { - "type": "object", - "description": "[Output Only] A map of filtered managed instance group lists.", - "additionalProperties": { - "$ref": "InstanceGroupManagersScopedList", - "description": "[Output Only] The name of the scope that contains this set of managed instance groups." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerAggregatedList for an aggregated list of managed instance groups.", - "default": "compute#instanceGroupManagerAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] The URL for this resource type. The server generates this URL." - } - } - }, - "InstanceGroupManagerAutoHealingPolicy": { - "id": "InstanceGroupManagerAutoHealingPolicy", - "type": "object", - "properties": { - "healthCheck": { - "type": "string", - "description": "The URL for the health check that signals autohealing." - }, - "initialDelaySec": { - "type": "integer", - "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", - "format": "int32" - } - } - }, - "InstanceGroupManagerList": { - "id": "InstanceGroupManagerList", - "type": "object", - "description": "[Output Only] A list of managed instance groups.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of managed instance groups.", - "items": { - "$ref": "InstanceGroupManager" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups.", - "default": "compute#instanceGroupManagerList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "InstanceGroupManagersAbandonInstancesRequest": { - "id": "InstanceGroupManagersAbandonInstancesRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "The URLs of one or more instances to abandon. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - } - } - } - }, - "InstanceGroupManagersDeleteInstancesRequest": { - "id": "InstanceGroupManagersDeleteInstancesRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - } - } - } - }, - "InstanceGroupManagersListManagedInstancesResponse": { - "id": "InstanceGroupManagersListManagedInstancesResponse", - "type": "object", - "properties": { - "managedInstances": { - "type": "array", - "description": "[Output Only] The list of instances in the managed instance group.", - "items": { - "$ref": "ManagedInstance" - } - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - } - } - }, - "InstanceGroupManagersRecreateInstancesRequest": { - "id": "InstanceGroupManagersRecreateInstancesRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "The URLs of one or more instances to recreate. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - } - } - } - }, - "InstanceGroupManagersResizeAdvancedRequest": { - "id": "InstanceGroupManagersResizeAdvancedRequest", - "type": "object", - "properties": { - "noCreationRetries": { - "type": "boolean", - "description": "If this flag is true, the managed instance group attempts to create all instances initiated by this resize request only once. If there is an error during creation, the managed instance group does not retry create this instance, and we will decrease the targetSize of the request instead. If the flag is false, the group attemps to recreate each instance continuously until it succeeds.\n\nThis flag matters only in the first attempt of creation of an instance. After an instance is successfully created while this flag is enabled, the instance behaves the same way as all the other instances created with a regular resize request. In particular, if a running instance dies unexpectedly at a later time and needs to be recreated, this mode does not affect the recreation behavior in that scenario.\n\nThis flag is applicable only to the current resize request. It does not influence other resize requests in any way.\n\nYou can see which instances is being creating in which mode by calling the get or listManagedInstances API." - }, - "targetSize": { - "type": "integer", - "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - "format": "int32" - } - } - }, - "InstanceGroupManagersScopedList": { - "id": "InstanceGroupManagersScopedList", - "type": "object", - "properties": { - "instanceGroupManagers": { - "type": "array", - "description": "[Output Only] The list of managed instance groups that are contained in the specified project and zone.", - "items": { - "$ref": "InstanceGroupManager" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] The warning that replaces the list of managed instance groups when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "InstanceGroupManagersSetAutoHealingRequest": { - "id": "InstanceGroupManagersSetAutoHealingRequest", - "type": "object", - "properties": { - "autoHealingPolicies": { - "type": "array", - "items": { - "$ref": "InstanceGroupManagerAutoHealingPolicy" - } - } - } - }, - "InstanceGroupManagersSetInstanceTemplateRequest": { - "id": "InstanceGroupManagersSetInstanceTemplateRequest", - "type": "object", - "properties": { - "instanceTemplate": { - "type": "string", - "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group." - } - } - }, - "InstanceGroupManagersSetTargetPoolsRequest": { - "id": "InstanceGroupManagersSetTargetPoolsRequest", - "type": "object", - "properties": { - "fingerprint": { - "type": "string", - "description": "The fingerprint of the target pools information. Use this optional property to prevent conflicts when multiple users change the target pools settings concurrently. Obtain the fingerprint with the instanceGroupManagers.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", - "format": "byte" - }, - "targetPools": { - "type": "array", - "description": "The list of target pool URLs that instances in this managed instance group belong to. The managed instance group applies these target pools to all of the instances in the group. Existing instances and new instances in the group all receive these target pool settings.", - "items": { - "type": "string" - } - } - } - }, - "InstanceGroupsAddInstancesRequest": { - "id": "InstanceGroupsAddInstancesRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "The list of instances to add to the instance group.", - "items": { - "$ref": "InstanceReference" - } - } - } - }, - "InstanceGroupsListInstances": { - "id": "InstanceGroupsListInstances", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] A unique identifier for this list of instances in the specified instance group. The server generates this identifier." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of instances and any named ports that are assigned to those instances.", - "items": { - "$ref": "InstanceWithNamedPorts" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroupsListInstances for the list of instances in the specified instance group.", - "default": "compute#instanceGroupsListInstances" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] The URL for this list of instances in the specified instance groups. The server generates this URL." - } - } - }, - "InstanceGroupsListInstancesRequest": { - "id": "InstanceGroupsListInstancesRequest", - "type": "object", - "properties": { - "instanceState": { - "type": "string", - "description": "A filter for the state of the instances in the instance group. Valid options are ALL or RUNNING. If you do not specify this parameter the list includes all instances regardless of their state.", - "enum": [ - "ALL", - "RUNNING" - ], - "enumDescriptions": [ - "", - "" - ] - } - } - }, - "InstanceGroupsRemoveInstancesRequest": { - "id": "InstanceGroupsRemoveInstancesRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "The list of instances to remove from the instance group.", - "items": { - "$ref": "InstanceReference" - } - } - } - }, - "InstanceGroupsScopedList": { - "id": "InstanceGroupsScopedList", - "type": "object", - "properties": { - "instanceGroups": { - "type": "array", - "description": "[Output Only] The list of instance groups that are contained in this scope.", - "items": { - "$ref": "InstanceGroup" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] An informational warning that replaces the list of instance groups when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "InstanceGroupsSetNamedPortsRequest": { - "id": "InstanceGroupsSetNamedPortsRequest", - "type": "object", - "properties": { - "fingerprint": { - "type": "string", - "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", - "format": "byte" - }, - "namedPorts": { - "type": "array", - "description": "The list of named ports to set for this instance group.", - "items": { - "$ref": "NamedPort" - } - } - } - }, - "InstanceList": { - "id": "InstanceList", - "type": "object", - "description": "Contains a list of instances.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of instances.", - "items": { - "$ref": "Instance" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#instanceList for lists of Instance resources.", - "default": "compute#instanceList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "InstanceListReferrers": { - "id": "InstanceListReferrers", - "type": "object", - "description": "Contains a list of instance referrers.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of referrers.", - "items": { - "$ref": "Reference" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#instanceListReferrers for lists of Instance referrers.", - "default": "compute#instanceListReferrers" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "InstanceMoveRequest": { - "id": "InstanceMoveRequest", - "type": "object", - "properties": { - "destinationZone": { - "type": "string", - "description": "The URL of the destination zone to move the instance. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone" - }, - "targetInstance": { - "type": "string", - "description": "The URL of the target instance to move. This can be a full or partial URL. For example, the following are all valid URLs to an instance: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance" - } - } - }, - "InstanceProperties": { - "id": "InstanceProperties", - "type": "object", - "description": "", - "properties": { - "canIpForward": { - "type": "boolean", - "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding for instances documentation for more information." - }, - "description": { - "type": "string", - "description": "An optional text description for the instances that are created from this instance template." - }, - "disks": { - "type": "array", - "description": "An array of disks that are associated with the instances that are created from this template.", - "items": { - "$ref": "AttachedDisk" - } - }, - "guestAccelerators": { - "type": "array", - "description": "A list of guest accelerator cards' type and count to use for instances created from the instance template.", - "items": { - "$ref": "AcceleratorConfig" - } - }, - "labels": { - "type": "object", - "description": "Labels to apply to instances that are created from this template.", - "additionalProperties": { - "type": "string" - } - }, - "machineType": { - "type": "string", - "description": "The machine type to use for instances that are created from this template.", - "annotations": { - "required": [ - "compute.instanceTemplates.insert" - ] - } - }, - "metadata": { - "$ref": "Metadata", - "description": "The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." - }, - "networkInterfaces": { - "type": "array", - "description": "An array of network access configurations for this interface.", - "items": { - "$ref": "NetworkInterface" - } - }, - "scheduling": { - "$ref": "Scheduling", - "description": "Specifies the scheduling options for the instances that are created from this template." - }, - "serviceAccounts": { - "type": "array", - "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from this template. Use metadata queries to obtain the access tokens for these instances.", - "items": { - "$ref": "ServiceAccount" - } - }, - "tags": { - "$ref": "Tags", - "description": "A list of tags to apply to the instances that are created from this template. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." - } - } - }, - "InstanceReference": { - "id": "InstanceReference", - "type": "object", - "properties": { - "instance": { - "type": "string", - "description": "The URL for a specific instance." - } - } - }, - "InstanceTemplate": { - "id": "InstanceTemplate", - "type": "object", - "description": "An Instance Template resource.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] The creation timestamp for this instance template in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] A unique identifier for this instance template. The server defines this identifier.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceTemplate for instance templates.", - "default": "compute#instanceTemplate" - }, - "name": { - "type": "string", - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.instanceTemplates.insert" - ] - } - }, - "properties": { - "$ref": "InstanceProperties", - "description": "The instance properties for this instance template." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] The URL for this instance template. The server defines this URL." - } - } - }, - "InstanceTemplateList": { - "id": "InstanceTemplateList", - "type": "object", - "description": "A list of instance templates.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] A unique identifier for this instance template. The server defines this identifier." - }, - "items": { - "type": "array", - "description": "[Output Only] list of InstanceTemplate resources.", - "items": { - "$ref": "InstanceTemplate" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceTemplatesListResponse for instance template lists.", - "default": "compute#instanceTemplateList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] The URL for this instance template list. The server defines this URL." - } - } - }, - "InstanceWithNamedPorts": { - "id": "InstanceWithNamedPorts", - "type": "object", - "properties": { - "instance": { - "type": "string", - "description": "[Output Only] The URL of the instance." - }, - "namedPorts": { - "type": "array", - "description": "[Output Only] The named ports that belong to this instance group.", - "items": { - "$ref": "NamedPort" - } - }, - "status": { - "type": "string", - "description": "[Output Only] The status of the instance.", - "enum": [ - "PROVISIONING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "" - ] - } - } - }, - "InstancesScopedList": { - "id": "InstancesScopedList", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "[Output Only] List of instances contained in this scope.", - "items": { - "$ref": "Instance" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] Informational warning which replaces the list of instances when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "InstancesSetLabelsRequest": { - "id": "InstancesSetLabelsRequest", - "type": "object", - "properties": { - "labelFingerprint": { - "type": "string", - "description": "Fingerprint of the previous set of labels for this resource, used to prevent conflicts. Provide the latest fingerprint value when making a request to add or change labels.", - "format": "byte" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - } - } - } - }, - "InstancesSetMachineResourcesRequest": { - "id": "InstancesSetMachineResourcesRequest", - "type": "object", - "properties": { - "guestAccelerators": { - "type": "array", - "description": "List of the type and count of accelerator cards attached to the instance.", - "items": { - "$ref": "AcceleratorConfig" - } - } - } - }, - "InstancesSetMachineTypeRequest": { - "id": "InstancesSetMachineTypeRequest", - "type": "object", - "properties": { - "machineType": { - "type": "string", - "description": "Full or partial URL of the machine type resource. See Machine Types for a full list of machine types. For example: zones/us-central1-f/machineTypes/n1-standard-1" - } - } - }, - "InstancesSetServiceAccountRequest": { - "id": "InstancesSetServiceAccountRequest", - "type": "object", - "properties": { - "email": { - "type": "string", - "description": "Email address of the service account." - }, - "scopes": { - "type": "array", - "description": "The list of scopes to be made available for this service account.", - "items": { - "type": "string" - } - } - } - }, - "InstancesStartWithEncryptionKeyRequest": { - "id": "InstancesStartWithEncryptionKeyRequest", - "type": "object", - "properties": { - "disks": { - "type": "array", - "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key.\n\nIn order to start the instance, the disk url and its corresponding key must be provided.\n\nIf the disk is not protected with a customer-supplied encryption key it should not be specified.", - "items": { - "$ref": "CustomerEncryptionKeyProtectedDisk" - } - } - } - }, - "License": { - "id": "License", - "type": "object", - "description": "A license resource.", - "properties": { - "chargesUseFee": { - "type": "boolean", - "description": "[Output Only] If true, the customer will be charged license fee for running software that contains this license on an instance." - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#license for licenses.", - "default": "compute#license" - }, - "name": { - "type": "string", - "description": "[Output Only] Name of the resource. The name is 1-63 characters long and complies with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.images.insert" - ] - } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - } - } - }, - "LogConfig": { - "id": "LogConfig", - "type": "object", - "description": "Specifies what kind of log the caller must write", - "properties": { - "counter": { - "$ref": "LogConfigCounterOptions", - "description": "Counter options." - } - } - }, - "LogConfigCounterOptions": { - "id": "LogConfigCounterOptions", - "type": "object", - "description": "Options for counters", - "properties": { - "field": { - "type": "string", - "description": "The field value to attribute." - }, - "metric": { - "type": "string", - "description": "The metric to update." - } - } - }, - "MachineType": { - "id": "MachineType", - "type": "object", - "description": "A Machine Type resource.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this machine type." - }, - "description": { - "type": "string", - "description": "[Output Only] An optional textual description of the resource." - }, - "guestCpus": { - "type": "integer", - "description": "[Output Only] The number of virtual CPUs that are available to the instance.", - "format": "int32" - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "isSharedCpu": { - "type": "boolean", - "description": "[Output Only] Whether this machine type has a shared CPU. See Shared-core machine types for more information." - }, - "kind": { - "type": "string", - "description": "[Output Only] The type of the resource. Always compute#machineType for machine types.", - "default": "compute#machineType" - }, - "maximumPersistentDisks": { - "type": "integer", - "description": "[Output Only] Maximum persistent disks allowed.", - "format": "int32" - }, - "maximumPersistentDisksSizeGb": { - "type": "string", - "description": "[Output Only] Maximum total persistent disks size (GB) allowed.", - "format": "int64" - }, - "memoryMb": { - "type": "integer", - "description": "[Output Only] The amount of physical memory available to the instance, defined in MB.", - "format": "int32" - }, - "name": { - "type": "string", - "description": "[Output Only] Name of the resource.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "zone": { - "type": "string", - "description": "[Output Only] The name of the zone where the machine type resides, such as us-central1-a." - } - } - }, - "MachineTypeAggregatedList": { - "id": "MachineTypeAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "[Output Only] A map of scoped machine type lists.", - "additionalProperties": { - "$ref": "MachineTypesScopedList", - "description": "[Output Only] Name of the scope containing this set of machine types." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#machineTypeAggregatedList for aggregated lists of machine types.", - "default": "compute#machineTypeAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "MachineTypeList": { - "id": "MachineTypeList", - "type": "object", - "description": "Contains a list of machine types.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of Machine Type resources.", - "items": { - "$ref": "MachineType" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#machineTypeList for lists of machine types.", - "default": "compute#machineTypeList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "MachineTypesScopedList": { - "id": "MachineTypesScopedList", - "type": "object", - "properties": { - "machineTypes": { - "type": "array", - "description": "[Output Only] List of machine types contained in this scope.", - "items": { - "$ref": "MachineType" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] An informational warning that appears when the machine types list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "ManagedInstance": { - "id": "ManagedInstance", - "type": "object", - "properties": { - "currentAction": { - "type": "string", - "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased instead. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", - "enum": [ - "ABANDONING", - "CREATING", - "CREATING_WITHOUT_RETRIES", - "DELETING", - "NONE", - "RECREATING", - "REFRESHING", - "RESTARTING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "id": { - "type": "string", - "description": "[Output only] The unique identifier for this resource. This field is empty when instance does not exist.", - "format": "uint64" - }, - "instance": { - "type": "string", - "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created." - }, - "instanceStatus": { - "type": "string", - "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", - "enum": [ - "PROVISIONING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "lastAttempt": { - "$ref": "ManagedInstanceLastAttempt", - "description": "[Output Only] Information about the last attempt to create or delete the instance." - }, - "version": { - "$ref": "ManagedInstanceVersion", - "description": "[Output Only] Intended version of this instance." - } - } - }, - "ManagedInstanceLastAttempt": { - "id": "ManagedInstanceLastAttempt", - "type": "object", - "properties": { - "errors": { - "type": "object", - "description": "[Output Only] Encountered errors during the last attempt to create or delete the instance.", - "properties": { - "errors": { - "type": "array", - "description": "[Output Only] The array of errors encountered while processing this operation.", - "items": { - "type": "object", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] The error type identifier for this error." - }, - "location": { - "type": "string", - "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional." - }, - "message": { - "type": "string", - "description": "[Output Only] An optional, human-readable error message." - } - } - } - } - } - } - } - }, - "ManagedInstanceVersion": { - "id": "ManagedInstanceVersion", - "type": "object", - "properties": { - "instanceTemplate": { - "type": "string", - "description": "[Output Only] The intended template of the instance. This field is empty when current_action is one of { DELETING, ABANDONING }." - }, - "name": { - "type": "string", - "description": "[Output Only] Name of the version." - } - } - }, - "Metadata": { - "id": "Metadata", - "type": "object", - "description": "A metadata key/value entry.", - "properties": { - "fingerprint": { - "type": "string", - "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.", - "format": "byte" - }, - "items": { - "type": "array", - "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", - "pattern": "[a-zA-Z0-9-_]{1,128}", - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] - } - }, - "value": { - "type": "string", - "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 32768 bytes.", - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] - } - } - } - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", - "default": "compute#metadata" - } - } - }, - "NamedPort": { - "id": "NamedPort", - "type": "object", - "description": "The named port. For example: .", - "properties": { - "name": { - "type": "string", - "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035." - }, - "port": { - "type": "integer", - "description": "The port number, which can be a value between 1 and 65535.", - "format": "int32" - } - } - }, - "Network": { - "id": "Network", - "type": "object", - "description": "Represents a Network resource. Read Networks and Firewalls for more information.", - "properties": { - "IPv4Range": { - "type": "string", - "description": "The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}" - }, - "autoCreateSubnetworks": { - "type": "boolean", - "description": "When set to true, the network is created in \"auto subnet mode\". When set to false, the network is in \"custom subnet mode\".\n\nIn \"auto subnet mode\", a newly created network is assigned the default CIDR of 10.128.0.0/9 and it automatically creates one subnetwork per region." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "gatewayIPv4": { - "type": "string", - "description": "A gateway address for default routing to other networks. This value is read only and is selected by the Google Compute Engine, typically as the first usable address in the IPv4Range.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}" - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#network for networks.", - "default": "compute#network" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.networks.insert" - ] - } - }, - "peerings": { - "type": "array", - "description": "[Output Only] List of network peerings for the resource.", - "items": { - "$ref": "NetworkPeering" - } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "subnetworks": { - "type": "array", - "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this network.", - "items": { - "type": "string" - } - } - } - }, - "NetworkInterface": { - "id": "NetworkInterface", - "type": "object", - "description": "A network interface resource attached to an instance.", - "properties": { - "accessConfigs": { - "type": "array", - "description": "An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access.", - "items": { - "$ref": "AccessConfig" - } - }, - "aliasIpRanges": { - "type": "array", - "description": "An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.", - "items": { - "$ref": "AliasIpRange" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#networkInterface for network interfaces.", - "default": "compute#networkInterface" - }, - "name": { - "type": "string", - "description": "[Output Only] The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc." - }, - "network": { - "type": "string", - "description": "URL of the network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used; if the network is not specified but the subnetwork is specified, the network is inferred.\n\nThis field is optional when creating a firewall rule. If not specified when creating a firewall rule, the default network global/networks/default is used.\n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default" - }, - "networkIP": { - "type": "string", - "description": "An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system." - }, - "subnetwork": { - "type": "string", - "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork \n- regions/region/subnetworks/subnetwork" - } - } - }, - "NetworkList": { - "id": "NetworkList", - "type": "object", - "description": "Contains a list of networks.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of Network resources.", - "items": { - "$ref": "Network" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#networkList for lists of networks.", - "default": "compute#networkList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "NetworkPeering": { - "id": "NetworkPeering", - "type": "object", - "description": "A network peering attached to a network resource. The message includes the peering name, peer network, peering state, and a flag indicating whether Google Compute Engine should automatically create routes for the peering.", - "properties": { - "autoCreateRoutes": { - "type": "boolean", - "description": "Whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network." - }, - "name": { - "type": "string", - "description": "Name of this peering. Provided by the client when the peering is created. The name must comply with RFC1035. Specifically, the name must be 1-63 characters long and match regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all the following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash." - }, - "network": { - "type": "string", - "description": "The URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network." - }, - "state": { - "type": "string", - "description": "[Output Only] State for the peering.", - "enum": [ - "ACTIVE", - "INACTIVE" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "stateDetails": { - "type": "string", - "description": "[Output Only] Details about the current state of the peering." - } - } - }, - "NetworksAddPeeringRequest": { - "id": "NetworksAddPeeringRequest", - "type": "object", - "properties": { - "autoCreateRoutes": { - "type": "boolean", - "description": "Whether Google Compute Engine manages the routes automatically." - }, - "name": { - "type": "string", - "description": "Name of the peering, which should conform to RFC1035." - }, - "peerNetwork": { - "type": "string", - "description": "URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network." - } - } - }, - "NetworksRemovePeeringRequest": { - "id": "NetworksRemovePeeringRequest", - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "Name of the peering, which should conform to RFC1035." - } - } - }, - "Operation": { - "id": "Operation", - "type": "object", - "description": "An Operation resource, used to manage asynchronous API requests.", - "properties": { - "clientOperationId": { - "type": "string", - "description": "[Output Only] Reserved for future use." - }, - "creationTimestamp": { - "type": "string", - "description": "[Deprecated] This field is deprecated." - }, - "description": { - "type": "string", - "description": "[Output Only] A textual description of the operation, which is set when the operation is created." - }, - "endTime": { - "type": "string", - "description": "[Output Only] The time that this operation was completed. This value is in RFC3339 text format." - }, - "error": { - "type": "object", - "description": "[Output Only] If errors are generated during processing of the operation, this field will be populated.", - "properties": { - "errors": { - "type": "array", - "description": "[Output Only] The array of errors encountered while processing this operation.", - "items": { - "type": "object", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] The error type identifier for this error." - }, - "location": { - "type": "string", - "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional." - }, - "message": { - "type": "string", - "description": "[Output Only] An optional, human-readable error message." - } - } - } - } - } - }, - "httpErrorMessage": { - "type": "string", - "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as NOT FOUND." - }, - "httpErrorStatusCode": { - "type": "integer", - "description": "[Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a 404 means the resource was not found.", - "format": "int32" - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "insertTime": { - "type": "string", - "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format." - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#operation for Operation resources.", - "default": "compute#operation" - }, - "name": { - "type": "string", - "description": "[Output Only] Name of the resource." - }, - "operationType": { - "type": "string", - "description": "[Output Only] The type of operation, such as insert, update, or delete, and so on." - }, - "progress": { - "type": "integer", - "description": "[Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.", - "format": "int32" - }, - "region": { - "type": "string", - "description": "[Output Only] The URL of the region where the operation resides. Only available when performing regional operations." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "startTime": { - "type": "string", - "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format." - }, - "status": { - "type": "string", - "description": "[Output Only] The status of the operation, which can be one of the following: PENDING, RUNNING, or DONE.", - "enum": [ - "DONE", - "PENDING", - "RUNNING" - ], - "enumDescriptions": [ - "", - "", - "" - ] - }, - "statusMessage": { - "type": "string", - "description": "[Output Only] An optional textual description of the current status of the operation." - }, - "targetId": { - "type": "string", - "description": "[Output Only] The unique target ID, which identifies a specific incarnation of the target resource.", - "format": "uint64" - }, - "targetLink": { - "type": "string", - "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from." - }, - "user": { - "type": "string", - "description": "[Output Only] User who requested the operation, for example: user@example.com." - }, - "warnings": { - "type": "array", - "description": "[Output Only] If warning messages are generated during processing of the operation, this field will be populated.", - "items": { - "type": "object", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - }, - "zone": { - "type": "string", - "description": "[Output Only] The URL of the zone where the operation resides. Only available when performing per-zone operations." - } - } - }, - "OperationAggregatedList": { - "id": "OperationAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "[Output Only] A map of scoped operation lists.", - "additionalProperties": { - "$ref": "OperationsScopedList", - "description": "[Output Only] Name of the scope containing this set of operations." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#operationAggregatedList for aggregated lists of operations.", - "default": "compute#operationAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "OperationList": { - "id": "OperationList", - "type": "object", - "description": "Contains a list of Operation resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of Operation resources.", - "items": { - "$ref": "Operation" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#operations for Operations resource.", - "default": "compute#operationList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "OperationsScopedList": { - "id": "OperationsScopedList", - "type": "object", - "properties": { - "operations": { - "type": "array", - "description": "[Output Only] List of operations contained in this scope.", - "items": { - "$ref": "Operation" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] Informational warning which replaces the list of operations when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "PathMatcher": { - "id": "PathMatcher", - "type": "object", - "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", - "properties": { - "defaultService": { - "type": "string", - "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules defined by this PathMatcher is matched by the URL's path portion. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService" - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "name": { - "type": "string", - "description": "The name to which this PathMatcher is referred by the HostRule." - }, - "pathRules": { - "type": "array", - "description": "The list of path rules.", - "items": { - "$ref": "PathRule" - } - } - } - }, - "PathRule": { - "id": "PathRule", - "type": "object", - "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.", - "properties": { - "paths": { - "type": "array", - "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.", - "items": { - "type": "string" - } - }, - "service": { - "type": "string", - "description": "The URL of the BackendService resource if this rule is matched." - } - } - }, - "Policy": { - "id": "Policy", - "type": "object", - "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources.\n\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of `members` to a `role`, where the members can be user accounts, Google groups, Google domains, and service accounts. A `role` is a named list of permissions defined by IAM.\n\n**Example**\n\n{ \"bindings\": [ { \"role\": \"roles/owner\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-other-app@appspot.gserviceaccount.com\", ] }, { \"role\": \"roles/viewer\", \"members\": [\"user:sean@example.com\"] } ] }\n\nFor a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam).", - "properties": { - "auditConfigs": { - "type": "array", - "description": "Specifies cloud audit logging configuration for this policy.", - "items": { - "$ref": "AuditConfig" - } - }, - "bindings": { - "type": "array", - "description": "Associates a list of `members` to a `role`. Multiple `bindings` must not be specified for the same `role`. `bindings` with no members will result in an error.", - "items": { - "$ref": "Binding" - } - }, - "etag": { - "type": "string", - "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten blindly.", - "format": "byte" - }, - "iamOwned": { - "type": "boolean", - "description": "" - }, - "rules": { - "type": "array", - "description": "If more than one rule is specified, the rules are applied in the following manner: - All matching LOG rules are always applied. - If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be applied if one or more matching rule requires logging. - Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted. Logging will be applied if one or more matching rule requires logging. - Otherwise, if no rule applies, permission is denied.", - "items": { - "$ref": "Rule" - } - }, - "version": { - "type": "integer", - "description": "Version of the `Policy`. The default version is 0.", - "format": "int32" - } - } - }, - "Project": { - "id": "Project", - "type": "object", - "description": "A Project resource. Projects can only be created in the Google Cloud Platform Console. Unless marked otherwise, values can only be modified in the console.", - "properties": { - "commonInstanceMetadata": { - "$ref": "Metadata", - "description": "Metadata key/value pairs available to all instances contained in this project. See Custom metadata for more information." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "defaultServiceAccount": { - "type": "string", - "description": "[Output Only] Default service account used by VMs running in this project." - }, - "description": { - "type": "string", - "description": "An optional textual description of the resource." - }, - "enabledFeatures": { - "type": "array", - "description": "Restricted features enabled for use on this project.", - "items": { - "type": "string" - } - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server. This is not the project ID, and is just a unique ID used by Compute Engine to identify resources.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#project for projects.", - "default": "compute#project" - }, - "name": { - "type": "string", - "description": "The project ID. For example: my-example-project. Use the project ID to make requests to Compute Engine." - }, - "quotas": { - "type": "array", - "description": "[Output Only] Quotas assigned to this project.", - "items": { - "$ref": "Quota" - } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "usageExportLocation": { - "$ref": "UsageExportLocation", - "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." - }, - "xpnProjectStatus": { - "type": "string", - "description": "[Output Only] The role this project has in a Cross Project Network (XPN) configuration. Currently only HOST projects are differentiated.", - "enum": [ - "HOST", - "UNSPECIFIED_XPN_PROJECT_STATUS" - ], - "enumDescriptions": [ - "", - "" - ] - } - } - }, - "ProjectsDisableXpnResourceRequest": { - "id": "ProjectsDisableXpnResourceRequest", - "type": "object", - "properties": { - "xpnResource": { - "$ref": "XpnResourceId", - "description": "XPN resource ID." - } - } - }, - "ProjectsEnableXpnResourceRequest": { - "id": "ProjectsEnableXpnResourceRequest", - "type": "object", - "properties": { - "xpnResource": { - "$ref": "XpnResourceId", - "description": "XPN resource ID." - } - } - }, - "ProjectsGetXpnResources": { - "id": "ProjectsGetXpnResources", - "type": "object", - "properties": { - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#projectsGetXpnResources for lists of XPN resources.", - "default": "compute#projectsGetXpnResources" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "resources": { - "type": "array", - "description": "XPN resources attached to this project as their XPN host.", - "items": { - "$ref": "XpnResourceId" - } - } - } - }, - "ProjectsListXpnHostsRequest": { - "id": "ProjectsListXpnHostsRequest", - "type": "object", - "properties": { - "organization": { - "type": "string", - "description": "Optional organization ID managed by Cloud Resource Manager, for which to list XPN host projects. If not specified, the organization will be inferred from the project." - } - } - }, - "Quota": { - "id": "Quota", - "type": "object", - "description": "A quotas entry.", - "properties": { - "limit": { - "type": "number", - "description": "[Output Only] Quota limit for this metric.", - "format": "double" - }, - "metric": { - "type": "string", - "description": "[Output Only] Name of the quota metric.", - "enum": [ - "AUTOSCALERS", - "BACKEND_BUCKETS", - "BACKEND_SERVICES", - "COMMITMENTS", - "CPUS", - "CPUS_ALL_REGIONS", - "DISKS_TOTAL_GB", - "FIREWALLS", - "FORWARDING_RULES", - "HEALTH_CHECKS", - "IMAGES", - "INSTANCES", - "INSTANCE_GROUPS", - "INSTANCE_GROUP_MANAGERS", - "INSTANCE_TEMPLATES", - "IN_USE_ADDRESSES", - "LOCAL_SSD_TOTAL_GB", - "NETWORKS", - "NVIDIA_K80_GPUS", - "PREEMPTIBLE_CPUS", - "REGIONAL_AUTOSCALERS", - "REGIONAL_INSTANCE_GROUP_MANAGERS", - "ROUTERS", - "ROUTES", - "SNAPSHOTS", - "SSD_TOTAL_GB", - "SSL_CERTIFICATES", - "STATIC_ADDRESSES", - "SUBNETWORKS", - "TARGET_HTTPS_PROXIES", - "TARGET_HTTP_PROXIES", - "TARGET_INSTANCES", - "TARGET_POOLS", - "TARGET_SSL_PROXIES", - "TARGET_VPN_GATEWAYS", - "URL_MAPS", - "VPN_TUNNELS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "usage": { - "type": "number", - "description": "[Output Only] Current usage of this metric.", - "format": "double" - } - } - }, - "Reference": { - "id": "Reference", - "type": "object", - "description": "Represents a reference to a resource.", - "properties": { - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#reference for references.", - "default": "compute#reference" - }, - "referenceType": { - "type": "string", - "description": "A description of the reference type with no implied semantics. Possible values include: \n- MEMBER_OF" - }, - "referrer": { - "type": "string", - "description": "URL of the resource which refers to the target." - }, - "target": { - "type": "string", - "description": "URL of the resource to which this reference points." - } - } - }, - "Region": { - "id": "Region", - "type": "object", - "description": "Region resource.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this region." - }, - "description": { - "type": "string", - "description": "[Output Only] Textual description of the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#region for regions.", - "default": "compute#region" - }, - "name": { - "type": "string", - "description": "[Output Only] Name of the resource." - }, - "quotas": { - "type": "array", - "description": "[Output Only] Quotas assigned to this region.", - "items": { - "$ref": "Quota" - } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "status": { - "type": "string", - "description": "[Output Only] Status of the region, either UP or DOWN.", - "enum": [ - "DOWN", - "UP" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "zones": { - "type": "array", - "description": "[Output Only] A list of zones available in this region, in the form of resource URLs.", - "items": { - "type": "string" - } - } - } - }, - "RegionAutoscalerList": { - "id": "RegionAutoscalerList", - "type": "object", - "description": "Contains a list of autoscalers.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of autoscalers.", - "items": { - "$ref": "Autoscaler" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#regionAutoscalerList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] A token used to continue a truncated list request." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "RegionInstanceGroupList": { - "id": "RegionInstanceGroupList", - "type": "object", - "description": "Contains a list of InstanceGroup resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of InstanceGroup resources.", - "items": { - "$ref": "InstanceGroup" - } - }, - "kind": { - "type": "string", - "description": "The resource type.", - "default": "compute#regionInstanceGroupList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] The URL for this resource type. The server generates this URL." - } - } - }, - "RegionInstanceGroupManagerList": { - "id": "RegionInstanceGroupManagerList", - "type": "object", - "description": "Contains a list of managed instance groups.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of managed instance groups.", - "items": { - "$ref": "InstanceGroupManager" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups that exist in th regional scope.", - "default": "compute#regionInstanceGroupManagerList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output only] A token used to continue a truncated list request." - }, - "selfLink": { - "type": "string", - "description": "[Output only] The URL for this resource type. The server generates this URL." - } - } - }, - "RegionInstanceGroupManagersAbandonInstancesRequest": { - "id": "RegionInstanceGroupManagersAbandonInstancesRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "The URLs of one or more instances to abandon. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - } - } - } - }, - "RegionInstanceGroupManagersDeleteInstancesRequest": { - "id": "RegionInstanceGroupManagersDeleteInstancesRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - } - } - } - }, - "RegionInstanceGroupManagersListInstancesResponse": { - "id": "RegionInstanceGroupManagersListInstancesResponse", - "type": "object", - "properties": { - "managedInstances": { - "type": "array", - "description": "List of managed instances.", - "items": { - "$ref": "ManagedInstance" - } - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - } - } - }, - "RegionInstanceGroupManagersRecreateRequest": { - "id": "RegionInstanceGroupManagersRecreateRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "The URLs of one or more instances to recreate. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - } - } - } - }, - "RegionInstanceGroupManagersSetAutoHealingRequest": { - "id": "RegionInstanceGroupManagersSetAutoHealingRequest", - "type": "object", - "properties": { - "autoHealingPolicies": { - "type": "array", - "items": { - "$ref": "InstanceGroupManagerAutoHealingPolicy" - } - } - } - }, - "RegionInstanceGroupManagersSetTargetPoolsRequest": { - "id": "RegionInstanceGroupManagersSetTargetPoolsRequest", - "type": "object", - "properties": { - "fingerprint": { - "type": "string", - "description": "Fingerprint of the target pools information, which is a hash of the contents. This field is used for optimistic locking when you update the target pool entries. This field is optional.", - "format": "byte" - }, - "targetPools": { - "type": "array", - "description": "The URL of all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", - "items": { - "type": "string" - } - } - } - }, - "RegionInstanceGroupManagersSetTemplateRequest": { - "id": "RegionInstanceGroupManagersSetTemplateRequest", - "type": "object", - "properties": { - "instanceTemplate": { - "type": "string", - "description": "URL of the InstanceTemplate resource from which all new instances will be created." - } - } - }, - "RegionInstanceGroupsListInstances": { - "id": "RegionInstanceGroupsListInstances", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource. Defined by the server." - }, - "items": { - "type": "array", - "description": "A list of instances and any named ports that are assigned to those instances.", - "items": { - "$ref": "InstanceWithNamedPorts" - } - }, - "kind": { - "type": "string", - "description": "The resource type.", - "default": "compute#regionInstanceGroupsListInstances" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - } - } - }, - "RegionInstanceGroupsListInstancesRequest": { - "id": "RegionInstanceGroupsListInstancesRequest", - "type": "object", - "properties": { - "instanceState": { - "type": "string", - "description": "Instances in which state should be returned. Valid options are: 'ALL', 'RUNNING'. By default, it lists all instances.", - "enum": [ - "ALL", - "RUNNING" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "portName": { - "type": "string", - "description": "Name of port user is interested in. It is optional. If it is set, only information about this ports will be returned. If it is not set, all the named ports will be returned. Always lists all instances.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - } - } - }, - "RegionInstanceGroupsSetNamedPortsRequest": { - "id": "RegionInstanceGroupsSetNamedPortsRequest", - "type": "object", - "properties": { - "fingerprint": { - "type": "string", - "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", - "format": "byte" - }, - "namedPorts": { - "type": "array", - "description": "The list of named ports to set for this instance group.", - "items": { - "$ref": "NamedPort" - } - } - } - }, - "RegionList": { - "id": "RegionList", - "type": "object", - "description": "Contains a list of region resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of Region resources.", - "items": { - "$ref": "Region" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#regionList for lists of regions.", - "default": "compute#regionList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "ResourceCommitment": { - "id": "ResourceCommitment", - "type": "object", - "description": "Commitment for a particular resource (a Commitment is composed of one or more of these).", - "properties": { - "amount": { - "type": "string", - "description": "The amount of the resource purchased (in a type-dependent unit, such as bytes).", - "format": "int64" - }, - "type": { - "type": "string", - "description": "Type of resource for which this commitment applies.", - "enum": [ - "MEMORY", - "UNSPECIFIED", - "VCPU" - ], - "enumDescriptions": [ - "", - "", - "" - ] - } - } - }, - "ResourceGroupReference": { - "id": "ResourceGroupReference", - "type": "object", - "properties": { - "group": { - "type": "string", - "description": "A URI referencing one of the instance groups listed in the backend service." - } - } - }, - "Route": { - "id": "Route", - "type": "object", - "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving a instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "destRange": { - "type": "string", - "description": "The destination range of outgoing packets that this route applies to. Only IPv4 is supported.", - "annotations": { - "required": [ - "compute.routes.insert" - ] - } - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of this resource. Always compute#routes for Route resources.", - "default": "compute#route" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.routes.insert" - ] - } - }, - "network": { - "type": "string", - "description": "Fully-qualified URL of the network that this route applies to.", - "annotations": { - "required": [ - "compute.routes.insert" - ] - } - }, - "nextHopGateway": { - "type": "string", - "description": "The URL to a gateway that should handle matching packets. You can only specify the internet gateway using a full or partial valid URL: projects/\u003cproject-id\u003e/global/gateways/default-internet-gateway" - }, - "nextHopInstance": { - "type": "string", - "description": "The URL to an instance that should handle matching packets. You can specify this as a full or partial URL. For example:\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/" - }, - "nextHopIp": { - "type": "string", - "description": "The network IP address of an instance that should handle matching packets. Only IPv4 is supported." - }, - "nextHopNetwork": { - "type": "string", - "description": "The URL of the local network if it should handle matching packets." - }, - "nextHopPeering": { - "type": "string", - "description": "[Output Only] The network peering name that should handle matching packets, which should conform to RFC1035." - }, - "nextHopVpnTunnel": { - "type": "string", - "description": "The URL to a VpnTunnel that should handle matching packets." - }, - "priority": { - "type": "integer", - "description": "The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In the case of two routes with equal prefix length, the one with the lowest-numbered priority value wins. Default value is 1000. Valid range is 0 through 65535.", - "format": "uint32", - "annotations": { - "required": [ - "compute.routes.insert" - ] - } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined fully-qualified URL for this resource." - }, - "tags": { - "type": "array", - "description": "A list of instance tags to which this route applies.", - "items": { - "type": "string" - }, - "annotations": { - "required": [ - "compute.routes.insert" - ] - } - }, - "warnings": { - "type": "array", - "description": "[Output Only] If potential misconfigurations are detected for this route, this field will be populated with warning messages.", - "items": { - "type": "object", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - } - }, - "RouteList": { - "id": "RouteList", - "type": "object", - "description": "Contains a list of Route resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource. Defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of Route resources.", - "items": { - "$ref": "Route" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#routeList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "Router": { - "id": "Router", - "type": "object", - "description": "Router resource.", - "properties": { - "bgp": { - "$ref": "RouterBgp", - "description": "BGP information specific to this router." - }, - "bgpPeers": { - "type": "array", - "description": "BGP information that needs to be configured into the routing stack to establish the BGP peering. It must specify peer ASN and either interface name, IP, or peer IP. Please refer to RFC4273.", - "items": { - "$ref": "RouterBgpPeer" - } - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "interfaces": { - "type": "array", - "description": "Router interfaces. Each interface requires either one linked resource (e.g. linkedVpnTunnel), or IP address and IP address range (e.g. ipRange), or both.", - "items": { - "$ref": "RouterInterface" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#router for routers.", - "default": "compute#router" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.routers.insert" - ] - } - }, - "network": { - "type": "string", - "description": "URI of the network to which this router belongs.", - "annotations": { - "required": [ - "compute.routers.insert" - ] - } - }, - "region": { - "type": "string", - "description": "[Output Only] URI of the region where the router resides." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - } - } - }, - "RouterAggregatedList": { - "id": "RouterAggregatedList", - "type": "object", - "description": "Contains a list of routers.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "A map of scoped router lists.", - "additionalProperties": { - "$ref": "RoutersScopedList", - "description": "Name of the scope containing this set of routers." - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#routerAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "RouterBgp": { - "id": "RouterBgp", - "type": "object", - "properties": { - "asn": { - "type": "integer", - "description": "Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN.", - "format": "uint32" - } - } - }, - "RouterBgpPeer": { - "id": "RouterBgpPeer", - "type": "object", - "properties": { - "advertisedRoutePriority": { - "type": "integer", - "description": "The priority of routes advertised to this BGP peer. In the case where there is more than one matching route of maximum length, the routes with lowest priority value win.", - "format": "uint32" - }, - "interfaceName": { - "type": "string", - "description": "Name of the interface the BGP peer is associated with." - }, - "ipAddress": { - "type": "string", - "description": "IP address of the interface inside Google Cloud Platform. Only IPv4 is supported." - }, - "name": { - "type": "string", - "description": "Name of this BGP peer. The name must be 1-63 characters long and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "peerAsn": { - "type": "integer", - "description": "Peer BGP Autonomous System Number (ASN). For VPN use case, this value can be different for every tunnel.", - "format": "uint32" - }, - "peerIpAddress": { - "type": "string", - "description": "IP address of the BGP interface outside Google cloud. Only IPv4 is supported." - } - } - }, - "RouterInterface": { - "id": "RouterInterface", - "type": "object", - "properties": { - "ipRange": { - "type": "string", - "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface." - }, - "linkedVpnTunnel": { - "type": "string", - "description": "URI of the linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment." - }, - "name": { - "type": "string", - "description": "Name of this interface entry. The name must be 1-63 characters long and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - } - } - }, - "RouterList": { - "id": "RouterList", - "type": "object", - "description": "Contains a list of Router resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of Router resources.", - "items": { - "$ref": "Router" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#router for routers.", - "default": "compute#routerList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "RouterStatus": { - "id": "RouterStatus", - "type": "object", - "properties": { - "bestRoutes": { - "type": "array", - "description": "Best routes for this router's network.", - "items": { - "$ref": "Route" - } - }, - "bestRoutesForRouter": { - "type": "array", - "description": "Best routes learned by this router.", - "items": { - "$ref": "Route" - } - }, - "bgpPeerStatus": { - "type": "array", - "items": { - "$ref": "RouterStatusBgpPeerStatus" - } - }, - "network": { - "type": "string", - "description": "URI of the network to which this router belongs." - } - } - }, - "RouterStatusBgpPeerStatus": { - "id": "RouterStatusBgpPeerStatus", - "type": "object", - "properties": { - "advertisedRoutes": { - "type": "array", - "description": "Routes that were advertised to the remote BGP peer", - "items": { - "$ref": "Route" - } - }, - "ipAddress": { - "type": "string", - "description": "IP address of the local BGP interface." - }, - "linkedVpnTunnel": { - "type": "string", - "description": "URL of the VPN tunnel that this BGP peer controls." - }, - "name": { - "type": "string", - "description": "Name of this BGP peer. Unique within the Routers resource." - }, - "numLearnedRoutes": { - "type": "integer", - "description": "Number of routes learned from the remote BGP Peer.", - "format": "uint32" - }, - "peerIpAddress": { - "type": "string", - "description": "IP address of the remote BGP interface." - }, - "state": { - "type": "string", - "description": "BGP state as specified in RFC1771." - }, - "status": { - "type": "string", - "description": "Status of the BGP peer: {UP, DOWN}", - "enum": [ - "DOWN", - "UNKNOWN", - "UP" - ], - "enumDescriptions": [ - "", - "", - "" - ] - }, - "uptime": { - "type": "string", - "description": "Time this session has been up. Format: 14 years, 51 weeks, 6 days, 23 hours, 59 minutes, 59 seconds" - }, - "uptimeSeconds": { - "type": "string", - "description": "Time this session has been up, in seconds. Format: 145" - } - } - }, - "RouterStatusResponse": { - "id": "RouterStatusResponse", - "type": "object", - "properties": { - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#routerStatusResponse" - }, - "result": { - "$ref": "RouterStatus" - } - } - }, - "RoutersPreviewResponse": { - "id": "RoutersPreviewResponse", - "type": "object", - "properties": { - "resource": { - "$ref": "Router", - "description": "Preview of given router." - } - } - }, - "RoutersScopedList": { - "id": "RoutersScopedList", - "type": "object", - "properties": { - "routers": { - "type": "array", - "description": "List of routers contained in this scope.", - "items": { - "$ref": "Router" - } - }, - "warning": { - "type": "object", - "description": "Informational warning which replaces the list of routers when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "Rule": { - "id": "Rule", - "type": "object", - "description": "A rule to be applied in a Policy.", - "properties": { - "action": { - "type": "string", - "description": "Required", - "enum": [ - "ALLOW", - "ALLOW_WITH_LOG", - "DENY", - "DENY_WITH_LOG", - "LOG", - "NO_ACTION" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ] - }, - "conditions": { - "type": "array", - "description": "Additional restrictions that must be met", - "items": { - "$ref": "Condition" - } - }, - "description": { - "type": "string", - "description": "Human-readable description of the rule." - }, - "ins": { - "type": "array", - "description": "If one or more 'in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", - "items": { - "type": "string" - } - }, - "logConfigs": { - "type": "array", - "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action.", - "items": { - "$ref": "LogConfig" - } - }, - "notIns": { - "type": "array", - "description": "If one or more 'not_in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.", - "items": { - "type": "string" - } - }, - "permissions": { - "type": "array", - "description": "A permission is a string of form '..' (e.g., 'storage.buckets.list'). A value of '*' matches all permissions, and a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", - "items": { - "type": "string" - } - } - } - }, - "SSLHealthCheck": { - "id": "SSLHealthCheck", - "type": "object", - "properties": { - "port": { - "type": "integer", - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", - "format": "int32" - }, - "portName": { - "type": "string", - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." - }, - "proxyHeader": { - "type": "string", - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "request": { - "type": "string", - "description": "The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII." - }, - "response": { - "type": "string", - "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII." - } - } - }, - "Scheduling": { - "id": "Scheduling", - "type": "object", - "description": "Sets the scheduling options for an Instance.", - "properties": { - "automaticRestart": { - "type": "boolean", - "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted.\n\nBy default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine." - }, - "onHostMaintenance": { - "type": "string", - "description": "Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options.", - "enum": [ - "MIGRATE", - "TERMINATE" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "preemptible": { - "type": "boolean", - "description": "Defines whether the instance is preemptible. This can only be set during instance creation, it cannot be set or changed after the instance has been created." - } - } - }, - "SerialPortOutput": { - "id": "SerialPortOutput", - "type": "object", - "description": "An instance's serial console output.", - "properties": { - "contents": { - "type": "string", - "description": "[Output Only] The contents of the console output." - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#serialPortOutput for serial port output.", - "default": "compute#serialPortOutput" - }, - "next": { - "type": "string", - "description": "[Output Only] The position of the next byte of content from the serial console output. Use this value in the next request as the start parameter.", - "format": "int64" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - }, - "start": { - "type": "string", - "description": "The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer, older output will be overwritten by newer content and the start values will be mismatched.", - "format": "int64" - } - } - }, - "ServiceAccount": { - "id": "ServiceAccount", - "type": "object", - "description": "A service account.", - "properties": { - "email": { - "type": "string", - "description": "Email address of the service account." - }, - "scopes": { - "type": "array", - "description": "The list of scopes to be made available for this service account.", - "items": { - "type": "string" - } - } - } - }, - "Snapshot": { - "id": "Snapshot", - "type": "object", - "description": "A persistent disk snapshot resource.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "diskSizeGb": { - "type": "string", - "description": "[Output Only] Size of the snapshot, specified in GB.", - "format": "int64" - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#snapshot for Snapshot resources.", - "default": "compute#snapshot" - }, - "labelFingerprint": { - "type": "string", - "description": "A fingerprint for the labels being applied to this snapshot, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make a get() request to retrieve a snapshot.", - "format": "byte" - }, - "labels": { - "type": "object", - "description": "Labels to apply to this snapshot. These can be later modified by the setLabels method. Label values may be empty.", - "additionalProperties": { - "type": "string" - } - }, - "licenses": { - "type": "array", - "description": "[Output Only] A list of public visible licenses that apply to this snapshot. This can be because the original image had licenses attached (such as a Windows image).", - "items": { - "type": "string" - } - }, - "name": { - "type": "string", - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "snapshotEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the image later For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." - }, - "sourceDisk": { - "type": "string", - "description": "[Output Only] The source disk used to create this snapshot." - }, - "sourceDiskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." - }, - "sourceDiskId": { - "type": "string", - "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name." - }, - "status": { - "type": "string", - "description": "[Output Only] The status of the snapshot. This can be CREATING, DELETING, FAILED, READY, or UPLOADING.", - "enum": [ - "CREATING", - "DELETING", - "FAILED", - "READY", - "UPLOADING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ] - }, - "storageBytes": { - "type": "string", - "description": "[Output Only] A size of the the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", - "format": "int64" - }, - "storageBytesStatus": { - "type": "string", - "description": "[Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date.", - "enum": [ - "UPDATING", - "UP_TO_DATE" - ], - "enumDescriptions": [ - "", - "" - ] - } - } - }, - "SnapshotList": { - "id": "SnapshotList", - "type": "object", - "description": "Contains a list of Snapshot resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of Snapshot resources.", - "items": { - "$ref": "Snapshot" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#snapshotList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "SslCertificate": { - "id": "SslCertificate", - "type": "object", - "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user.", - "properties": { - "certificate": { - "type": "string", - "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#sslCertificate for SSL certificates.", - "default": "compute#sslCertificate" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "privateKey": { - "type": "string", - "description": "A write-only private key in PEM format. Only insert requests will include this field." - }, - "selfLink": { - "type": "string", - "description": "[Output only] Server-defined URL for the resource." - } - } - }, - "SslCertificateList": { - "id": "SslCertificateList", - "type": "object", - "description": "Contains a list of SslCertificate resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource. Defined by the server." - }, - "items": { - "type": "array", - "description": "A list of SslCertificate resources.", - "items": { - "$ref": "SslCertificate" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#sslCertificateList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "Subnetwork": { - "id": "Subnetwork", - "type": "object", - "description": "A Subnetwork resource.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "gatewayAddress": { - "type": "string", - "description": "[Output Only] The gateway address for default routes to reach destination addresses outside this subnetwork." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "ipCidrRange": { - "type": "string", - "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported." - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#subnetwork for Subnetwork resources.", - "default": "compute#subnetwork" - }, - "name": { - "type": "string", - "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "network": { - "type": "string", - "description": "The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. Only networks that are in the distributed mode can have subnetworks." - }, - "privateIpGoogleAccess": { - "type": "boolean", - "description": "Whether the VMs in this subnet can access Google services without assigned external IP addresses." - }, - "region": { - "type": "string", - "description": "URL of the region where the Subnetwork resides." - }, - "secondaryIpRanges": { - "type": "array", - "description": "An array of configurations for secondary IP ranges for VM instances contained in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange of the subnetwork. The alias IPs may belong to either primary or secondary ranges.", - "items": { - "$ref": "SubnetworkSecondaryRange" - } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - } - } - }, - "SubnetworkAggregatedList": { - "id": "SubnetworkAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "[Output] A map of scoped Subnetwork lists.", - "additionalProperties": { - "$ref": "SubnetworksScopedList", - "description": "Name of the scope containing this set of Subnetworks." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#subnetworkAggregatedList for aggregated lists of subnetworks.", - "default": "compute#subnetworkAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "SubnetworkList": { - "id": "SubnetworkList", - "type": "object", - "description": "Contains a list of Subnetwork resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "The Subnetwork resources.", - "items": { - "$ref": "Subnetwork" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#subnetworkList for lists of subnetworks.", - "default": "compute#subnetworkList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "SubnetworkSecondaryRange": { - "id": "SubnetworkSecondaryRange", - "type": "object", - "description": "Represents a secondary IP range of a subnetwork.", - "properties": { - "ipCidrRange": { - "type": "string", - "description": "The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported." - }, - "rangeName": { - "type": "string", - "description": "The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork." - } - } - }, - "SubnetworksExpandIpCidrRangeRequest": { - "id": "SubnetworksExpandIpCidrRangeRequest", - "type": "object", - "properties": { - "ipCidrRange": { - "type": "string", - "description": "The IP (in CIDR format or netmask) of internal addresses that are legal on this Subnetwork. This range should be disjoint from other subnetworks within this network. This range can only be larger than (i.e. a superset of) the range previously defined before the update." - } - } - }, - "SubnetworksScopedList": { - "id": "SubnetworksScopedList", - "type": "object", - "properties": { - "subnetworks": { - "type": "array", - "description": "List of subnetworks contained in this scope.", - "items": { - "$ref": "Subnetwork" - } - }, - "warning": { - "type": "object", - "description": "An informational warning that appears when the list of addresses is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "SubnetworksSetPrivateIpGoogleAccessRequest": { - "id": "SubnetworksSetPrivateIpGoogleAccessRequest", - "type": "object", - "properties": { - "privateIpGoogleAccess": { - "type": "boolean" - } - } - }, - "TCPHealthCheck": { - "id": "TCPHealthCheck", - "type": "object", - "properties": { - "port": { - "type": "integer", - "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", - "format": "int32" - }, - "portName": { - "type": "string", - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." - }, - "proxyHeader": { - "type": "string", - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "request": { - "type": "string", - "description": "The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII." - }, - "response": { - "type": "string", - "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII." - } - } - }, - "Tags": { - "id": "Tags", - "type": "object", - "description": "A set of instance tags.", - "properties": { - "fingerprint": { - "type": "string", - "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.\n\nTo see the latest fingerprint, make get() request to the instance.", - "format": "byte" - }, - "items": { - "type": "array", - "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.", - "items": { - "type": "string" - } - } - } - }, - "TargetHttpProxy": { - "id": "TargetHttpProxy", - "type": "object", - "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetHttpProxy for target HTTP proxies.", - "default": "compute#targetHttpProxy" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "urlMap": { - "type": "string", - "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService." - } - } - }, - "TargetHttpProxyList": { - "id": "TargetHttpProxyList", - "type": "object", - "description": "A list of TargetHttpProxy resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of TargetHttpProxy resources.", - "items": { - "$ref": "TargetHttpProxy" - } - }, - "kind": { - "type": "string", - "description": "Type of resource. Always compute#targetHttpProxyList for lists of target HTTP proxies.", - "default": "compute#targetHttpProxyList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetHttpsProxiesSetSslCertificatesRequest": { - "id": "TargetHttpsProxiesSetSslCertificatesRequest", - "type": "object", - "properties": { - "sslCertificates": { - "type": "array", - "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", - "items": { - "type": "string" - } - } - } - }, - "TargetHttpsProxy": { - "id": "TargetHttpsProxy", - "type": "object", - "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetHttpsProxy for target HTTPS proxies.", - "default": "compute#targetHttpsProxy" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "sslCertificates": { - "type": "array", - "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, exactly one SSL certificate must be specified.", - "items": { - "type": "string" - } - }, - "urlMap": { - "type": "string", - "description": "A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. For example, the following are all valid URLs for specifying a URL map: \n- https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map \n- projects/project/global/urlMaps/url-map \n- global/urlMaps/url-map" - } - } - }, - "TargetHttpsProxyList": { - "id": "TargetHttpsProxyList", - "type": "object", - "description": "Contains a list of TargetHttpsProxy resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of TargetHttpsProxy resources.", - "items": { - "$ref": "TargetHttpsProxy" - } - }, - "kind": { - "type": "string", - "description": "Type of resource. Always compute#targetHttpsProxyList for lists of target HTTPS proxies.", - "default": "compute#targetHttpsProxyList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetInstance": { - "id": "TargetInstance", - "type": "object", - "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "instance": { - "type": "string", - "description": "A URL to the virtual machine instance that handles traffic for this target instance. When creating a target instance, you can provide the fully-qualified URL or a valid partial URL to the desired virtual machine. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance" - }, - "kind": { - "type": "string", - "description": "[Output Only] The type of the resource. Always compute#targetInstance for target instances.", - "default": "compute#targetInstance" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "natPolicy": { - "type": "string", - "description": "NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported.", - "enum": [ - "NO_NAT" - ], - "enumDescriptions": [ - "" - ] - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "zone": { - "type": "string", - "description": "[Output Only] URL of the zone where the target instance resides." - } - } - }, - "TargetInstanceAggregatedList": { - "id": "TargetInstanceAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "object", - "description": "A map of scoped target instance lists.", - "additionalProperties": { - "$ref": "TargetInstancesScopedList", - "description": "Name of the scope containing this set of target instances." - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#targetInstanceAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetInstanceList": { - "id": "TargetInstanceList", - "type": "object", - "description": "Contains a list of TargetInstance resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of TargetInstance resources.", - "items": { - "$ref": "TargetInstance" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#targetInstanceList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetInstancesScopedList": { - "id": "TargetInstancesScopedList", - "type": "object", - "properties": { - "targetInstances": { - "type": "array", - "description": "List of target instances contained in this scope.", - "items": { - "$ref": "TargetInstance" - } - }, - "warning": { - "type": "object", - "description": "Informational warning which replaces the list of addresses when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "TargetPool": { - "id": "TargetPool", - "type": "object", - "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool.", - "properties": { - "backupPool": { - "type": "string", - "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its failoverRatio field is properly set to a value between [0, 1].\n\nbackupPool and failoverRatio together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below failoverRatio, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio and backupPool are not set, or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "failoverRatio": { - "type": "number", - "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, backupPool must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio is not set or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", - "format": "float" - }, - "healthChecks": { - "type": "array", - "description": "The URL of the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if the health checks pass. An empty list means all member instances will be considered healthy at all times. Only HttpHealthChecks are supported. Only one health check may be specified.", - "items": { - "type": "string" - } - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "instances": { - "type": "array", - "description": "A list of resource URLs to the virtual machine instances serving this pool. They must live in zones contained in the same region as this pool.", - "items": { - "type": "string" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#targetPool for target pools.", - "default": "compute#targetPool" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "region": { - "type": "string", - "description": "[Output Only] URL of the region where the target pool resides." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "sessionAffinity": { - "type": "string", - "description": "Sesssion affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", - "enum": [ - "CLIENT_IP", - "CLIENT_IP_PORT_PROTO", - "CLIENT_IP_PROTO", - "GENERATED_COOKIE", - "NONE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ] - } - } - }, - "TargetPoolAggregatedList": { - "id": "TargetPoolAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource. Defined by the server." - }, - "items": { - "type": "object", - "description": "[Output Only] A map of scoped target pool lists.", - "additionalProperties": { - "$ref": "TargetPoolsScopedList", - "description": "Name of the scope containing this set of target pools." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetPoolAggregatedList for aggregated lists of target pools.", - "default": "compute#targetPoolAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetPoolInstanceHealth": { - "id": "TargetPoolInstanceHealth", - "type": "object", - "properties": { - "healthStatus": { - "type": "array", - "items": { - "$ref": "HealthStatus" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetPoolInstanceHealth when checking the health of an instance.", - "default": "compute#targetPoolInstanceHealth" - } - } - }, - "TargetPoolList": { - "id": "TargetPoolList", - "type": "object", - "description": "Contains a list of TargetPool resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource. Defined by the server." - }, - "items": { - "type": "array", - "description": "A list of TargetPool resources.", - "items": { - "$ref": "TargetPool" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetPoolList for lists of target pools.", - "default": "compute#targetPoolList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetPoolsAddHealthCheckRequest": { - "id": "TargetPoolsAddHealthCheckRequest", - "type": "object", - "properties": { - "healthChecks": { - "type": "array", - "description": "The HttpHealthCheck to add to the target pool.", - "items": { - "$ref": "HealthCheckReference" - } - } - } - }, - "TargetPoolsAddInstanceRequest": { - "id": "TargetPoolsAddInstanceRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "A full or partial URL to an instance to add to this target pool. This can be a full or partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name \n- projects/project-id/zones/zone/instances/instance-name \n- zones/zone/instances/instance-name", - "items": { - "$ref": "InstanceReference" - } - } - } - }, - "TargetPoolsRemoveHealthCheckRequest": { - "id": "TargetPoolsRemoveHealthCheckRequest", - "type": "object", - "properties": { - "healthChecks": { - "type": "array", - "description": "Health check URL to be removed. This can be a full or valid partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check \n- projects/project/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", - "items": { - "$ref": "HealthCheckReference" - } - } - } - }, - "TargetPoolsRemoveInstanceRequest": { - "id": "TargetPoolsRemoveInstanceRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "URLs of the instances to be removed from target pool.", - "items": { - "$ref": "InstanceReference" - } - } - } - }, - "TargetPoolsScopedList": { - "id": "TargetPoolsScopedList", - "type": "object", - "properties": { - "targetPools": { - "type": "array", - "description": "List of target pools contained in this scope.", - "items": { - "$ref": "TargetPool" - } - }, - "warning": { - "type": "object", - "description": "Informational warning which replaces the list of addresses when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "TargetReference": { - "id": "TargetReference", - "type": "object", - "properties": { - "target": { - "type": "string" - } - } - }, - "TargetSslProxiesSetBackendServiceRequest": { - "id": "TargetSslProxiesSetBackendServiceRequest", - "type": "object", - "properties": { - "service": { - "type": "string", - "description": "The URL of the new BackendService resource for the targetSslProxy." - } - } - }, - "TargetSslProxiesSetProxyHeaderRequest": { - "id": "TargetSslProxiesSetProxyHeaderRequest", - "type": "object", - "properties": { - "proxyHeader": { - "type": "string", - "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - } - } - }, - "TargetSslProxiesSetSslCertificatesRequest": { - "id": "TargetSslProxiesSetSslCertificatesRequest", - "type": "object", - "properties": { - "sslCertificates": { - "type": "array", - "description": "New set of URLs to SslCertificate resources to associate with this TargetSslProxy. Currently exactly one ssl certificate must be specified.", - "items": { - "type": "string" - } - } - } - }, - "TargetSslProxy": { - "id": "TargetSslProxy", - "type": "object", - "description": "A TargetSslProxy resource. This resource defines an SSL proxy.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#targetSslProxy for target SSL proxies.", - "default": "compute#targetSslProxy" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "proxyHeader": { - "type": "string", - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "service": { - "type": "string", - "description": "URL to the BackendService resource." - }, - "sslCertificates": { - "type": "array", - "description": "URLs to SslCertificate resources that are used to authenticate connections to Backends. Currently exactly one SSL certificate must be specified.", - "items": { - "type": "string" - } - } - } - }, - "TargetSslProxyList": { - "id": "TargetSslProxyList", - "type": "object", - "description": "Contains a list of TargetSslProxy resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of TargetSslProxy resources.", - "items": { - "$ref": "TargetSslProxy" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#targetSslProxyList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetTcpProxiesSetBackendServiceRequest": { - "id": "TargetTcpProxiesSetBackendServiceRequest", - "type": "object", - "properties": { - "service": { - "type": "string", - "description": "The URL of the new BackendService resource for the targetTcpProxy." - } - } - }, - "TargetTcpProxiesSetProxyHeaderRequest": { - "id": "TargetTcpProxiesSetProxyHeaderRequest", - "type": "object", - "properties": { - "proxyHeader": { - "type": "string", - "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - } - } - }, - "TargetTcpProxy": { - "id": "TargetTcpProxy", - "type": "object", - "description": "A TargetTcpProxy resource. This resource defines a TCP proxy.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#targetTcpProxy for target TCP proxies.", - "default": "compute#targetTcpProxy" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "proxyHeader": { - "type": "string", - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "service": { - "type": "string", - "description": "URL to the BackendService resource." - } - } - }, - "TargetTcpProxyList": { - "id": "TargetTcpProxyList", - "type": "object", - "description": "Contains a list of TargetTcpProxy resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "A list of TargetTcpProxy resources.", - "items": { - "$ref": "TargetTcpProxy" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#targetTcpProxyList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetVpnGateway": { - "id": "TargetVpnGateway", - "type": "object", - "description": "Represents a Target VPN gateway resource.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "forwardingRules": { - "type": "array", - "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated to a VPN gateway.", - "items": { - "type": "string" - } - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", - "default": "compute#targetVpnGateway" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.targetVpnGateways.insert" - ] - } - }, - "network": { - "type": "string", - "description": "URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.", - "annotations": { - "required": [ - "compute.targetVpnGateways.insert" - ] - } - }, - "region": { - "type": "string", - "description": "[Output Only] URL of the region where the target VPN gateway resides." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "status": { - "type": "string", - "description": "[Output Only] The status of the VPN gateway.", - "enum": [ - "CREATING", - "DELETING", - "FAILED", - "READY" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ] - }, - "tunnels": { - "type": "array", - "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using compute.vpntunnels.insert method and associated to a VPN gateway.", - "items": { - "type": "string" - } - } - } - }, - "TargetVpnGatewayAggregatedList": { - "id": "TargetVpnGatewayAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "A map of scoped target vpn gateway lists.", - "additionalProperties": { - "$ref": "TargetVpnGatewaysScopedList", - "description": "[Output Only] Name of the scope containing this set of target VPN gateways." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", - "default": "compute#targetVpnGatewayAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetVpnGatewayList": { - "id": "TargetVpnGatewayList", - "type": "object", - "description": "Contains a list of TargetVpnGateway resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of TargetVpnGateway resources.", - "items": { - "$ref": "TargetVpnGateway" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", - "default": "compute#targetVpnGatewayList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetVpnGatewaysScopedList": { - "id": "TargetVpnGatewaysScopedList", - "type": "object", - "properties": { - "targetVpnGateways": { - "type": "array", - "description": "[Output Only] List of target vpn gateways contained in this scope.", - "items": { - "$ref": "TargetVpnGateway" - } - }, - "warning": { - "type": "object", - "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "TestFailure": { - "id": "TestFailure", - "type": "object", - "properties": { - "actualService": { - "type": "string" - }, - "expectedService": { - "type": "string" - }, - "host": { - "type": "string" - }, - "path": { - "type": "string" - } - } - }, - "TestPermissionsRequest": { - "id": "TestPermissionsRequest", - "type": "object", - "properties": { - "permissions": { - "type": "array", - "description": "The set of permissions to check for the 'resource'. Permissions with wildcards (such as '*' or 'storage.*') are not allowed.", - "items": { - "type": "string" - } - } - } - }, - "TestPermissionsResponse": { - "id": "TestPermissionsResponse", - "type": "object", - "properties": { - "permissions": { - "type": "array", - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", - "items": { - "type": "string" - } - } - } - }, - "UDPHealthCheck": { - "id": "UDPHealthCheck", - "type": "object", - "properties": { - "port": { - "type": "integer", - "description": "The UDP port number for the health check request. Valid values are 1 through 65535.", - "format": "int32" - }, - "portName": { - "type": "string", - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." - }, - "request": { - "type": "string", - "description": "Raw data of request to send in payload of UDP packet. It is an error if this is empty. The request data can only be ASCII." - }, - "response": { - "type": "string", - "description": "The bytes to match against the beginning of the response data. It is an error if this is empty. The response data can only be ASCII." - } - } - }, - "UrlMap": { - "id": "UrlMap", - "type": "object", - "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "defaultService": { - "type": "string", - "description": "The URL of the BackendService resource if none of the hostRules match." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "fingerprint": { - "type": "string", - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap.", - "format": "byte" - }, - "hostRules": { - "type": "array", - "description": "The list of HostRules to use against the URL.", - "items": { - "$ref": "HostRule" - } - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", - "default": "compute#urlMap" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "pathMatchers": { - "type": "array", - "description": "The list of named PathMatchers to use against the URL.", - "items": { - "$ref": "PathMatcher" - } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "tests": { - "type": "array", - "description": "The list of expected URL mappings. Request to update this UrlMap will succeed only if all of the test cases pass.", - "items": { - "$ref": "UrlMapTest" - } - } - } - }, - "UrlMapList": { - "id": "UrlMapList", - "type": "object", - "description": "Contains a list of UrlMap resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource. Set by the server." - }, - "items": { - "type": "array", - "description": "A list of UrlMap resources.", - "items": { - "$ref": "UrlMap" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#urlMapList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "UrlMapReference": { - "id": "UrlMapReference", - "type": "object", - "properties": { - "urlMap": { - "type": "string" - } - } - }, - "UrlMapTest": { - "id": "UrlMapTest", - "type": "object", - "description": "Message for the expected URL mappings.", - "properties": { - "description": { - "type": "string", - "description": "Description of this test case." - }, - "host": { - "type": "string", - "description": "Host portion of the URL." - }, - "path": { - "type": "string", - "description": "Path portion of the URL." - }, - "service": { - "type": "string", - "description": "Expected BackendService resource the given URL should be mapped to." - } - } - }, - "UrlMapValidationResult": { - "id": "UrlMapValidationResult", - "type": "object", - "description": "Message representing the validation result for a UrlMap.", - "properties": { - "loadErrors": { - "type": "array", - "items": { - "type": "string" - } - }, - "loadSucceeded": { - "type": "boolean", - "description": "Whether the given UrlMap can be successfully loaded. If false, 'loadErrors' indicates the reasons." - }, - "testFailures": { - "type": "array", - "items": { - "$ref": "TestFailure" - } - }, - "testPassed": { - "type": "boolean", - "description": "If successfully loaded, this field indicates whether the test passed. If false, 'testFailures's indicate the reason of failure." - } - } - }, - "UrlMapsValidateRequest": { - "id": "UrlMapsValidateRequest", - "type": "object", - "properties": { - "resource": { - "$ref": "UrlMap", - "description": "Content of the UrlMap to be validated." - } - } - }, - "UrlMapsValidateResponse": { - "id": "UrlMapsValidateResponse", - "type": "object", - "properties": { - "result": { - "$ref": "UrlMapValidationResult" - } - } - }, - "UsageExportLocation": { - "id": "UsageExportLocation", - "type": "object", - "description": "The location in Cloud Storage and naming method of the daily usage report. Contains bucket_name and report_name prefix.", - "properties": { - "bucketName": { - "type": "string", - "description": "The name of an existing bucket in Cloud Storage where the usage report object is stored. The Google Service Account is granted write access to this bucket. This can either be the bucket name by itself, such as example-bucket, or the bucket name with gs:// or https://storage.googleapis.com/ in front of it, such as gs://example-bucket." - }, - "reportNamePrefix": { - "type": "string", - "description": "An optional prefix for the name of the usage report object stored in bucketName. If not supplied, defaults to usage. The report is stored as a CSV file named report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the usage according to Pacific Time. If you supply a prefix, it should conform to Cloud Storage object naming conventions." - } - } - }, - "VpnTunnel": { - "id": "VpnTunnel", - "type": "object", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "detailedStatus": { - "type": "string", - "description": "[Output Only] Detailed status message for the VPN tunnel." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "ikeVersion": { - "type": "integer", - "description": "IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2.", - "format": "int32" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", - "default": "compute#vpnTunnel" - }, - "localTrafficSelector": { - "type": "array", - "description": "Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", - "items": { - "type": "string" - } - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.vpnTunnels.insert" - ] - } - }, - "peerIp": { - "type": "string", - "description": "IP address of the peer VPN gateway. Only IPv4 is supported." - }, - "region": { - "type": "string", - "description": "[Output Only] URL of the region where the VPN tunnel resides." - }, - "remoteTrafficSelector": { - "type": "array", - "description": "Remote traffic selectors to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", - "items": { - "type": "string" - } - }, - "router": { - "type": "string", - "description": "URL of router resource to be used for dynamic routing." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "sharedSecret": { - "type": "string", - "description": "Shared secret used to set the secure session between the Cloud VPN gateway and the peer VPN gateway." - }, - "sharedSecretHash": { - "type": "string", - "description": "Hash of the shared secret." - }, - "status": { - "type": "string", - "description": "[Output Only] The status of the VPN tunnel.", - "enum": [ - "ALLOCATING_RESOURCES", - "AUTHORIZATION_ERROR", - "DEPROVISIONING", - "ESTABLISHED", - "FAILED", - "FIRST_HANDSHAKE", - "NEGOTIATION_FAILURE", - "NETWORK_ERROR", - "NO_INCOMING_PACKETS", - "PROVISIONING", - "REJECTED", - "WAITING_FOR_FULL_CONFIG" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "targetVpnGateway": { - "type": "string", - "description": "URL of the VPN gateway with which this VPN tunnel is associated. Provided by the client when the VPN tunnel is created.", - "annotations": { - "required": [ - "compute.vpnTunnels.insert" - ] - } - } - } - }, - "VpnTunnelAggregatedList": { - "id": "VpnTunnelAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "[Output Only] A map of scoped vpn tunnel lists.", - "additionalProperties": { - "$ref": "VpnTunnelsScopedList", - "description": "Name of the scope containing this set of vpn tunnels." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", - "default": "compute#vpnTunnelAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "VpnTunnelList": { - "id": "VpnTunnelList", - "type": "object", - "description": "Contains a list of VpnTunnel resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of VpnTunnel resources.", - "items": { - "$ref": "VpnTunnel" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", - "default": "compute#vpnTunnelList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "VpnTunnelsScopedList": { - "id": "VpnTunnelsScopedList", - "type": "object", - "properties": { - "vpnTunnels": { - "type": "array", - "description": "List of vpn tunnels contained in this scope.", - "items": { - "$ref": "VpnTunnel" - } - }, - "warning": { - "type": "object", - "description": "Informational warning which replaces the list of addresses when the list is empty.", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } - } - }, - "XpnHostList": { - "id": "XpnHostList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of XPN host project URLs.", - "items": { - "$ref": "Project" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#xpnHostList for lists of XPN hosts.", - "default": "compute#xpnHostList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "XpnResourceId": { - "id": "XpnResourceId", - "type": "object", - "description": "XpnResourceId", - "properties": { - "id": { - "type": "string", - "description": "The ID of the XPN resource. In the case of projects, this field matches the project's name, not the canonical ID." - }, - "type": { - "type": "string", - "description": "The type of the XPN resource.", - "enum": [ - "PROJECT", - "XPN_RESOURCE_TYPE_UNSPECIFIED" - ], - "enumDescriptions": [ - "", - "" - ] - } - } - }, - "Zone": { - "id": "Zone", - "type": "object", - "description": "A Zone resource.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this zone." - }, - "description": { - "type": "string", - "description": "[Output Only] Textual description of the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#zone for zones.", - "default": "compute#zone" - }, - "name": { - "type": "string", - "description": "[Output Only] Name of the resource." - }, - "region": { - "type": "string", - "description": "[Output Only] Full URL reference to the region which hosts the zone." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "status": { - "type": "string", - "description": "[Output Only] Status of the zone, either UP or DOWN.", - "enum": [ - "DOWN", - "UP" - ], - "enumDescriptions": [ - "", - "" - ] - } - } - }, - "ZoneList": { - "id": "ZoneList", - "type": "object", - "description": "Contains a list of zone resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "[Output Only] A list of Zone resources.", - "items": { - "$ref": "Zone" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#zoneList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "ZoneSetLabelsRequest": { - "id": "ZoneSetLabelsRequest", - "type": "object", - "properties": { - "labelFingerprint": { - "type": "string", - "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. Make a get() request to the resource to get the latest fingerprint.", - "format": "byte" - }, - "labels": { - "type": "object", - "description": "The labels to set for this resource.", - "additionalProperties": { - "type": "string" - } - } - } - } - }, - "resources": { - "acceleratorTypes": { - "methods": { - "aggregatedList": { - "id": "compute.acceleratorTypes.aggregatedList", - "path": "{project}/aggregated/acceleratorTypes", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of accelerator types.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "AcceleratorTypeAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "get": { - "id": "compute.acceleratorTypes.get", - "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", - "httpMethod": "GET", - "description": "Returns the specified accelerator type. Get a list of available accelerator types by making a list() request.", - "parameters": { - "acceleratorType": { - "type": "string", - "description": "Name of the accelerator type to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "acceleratorType" - ], - "response": { - "$ref": "AcceleratorType" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "id": "compute.acceleratorTypes.list", - "path": "{project}/zones/{zone}/acceleratorTypes", - "httpMethod": "GET", - "description": "Retrieves a list of accelerator types available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "response": { - "$ref": "AcceleratorTypeList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "addresses": { - "methods": { - "aggregatedList": { - "id": "compute.addresses.aggregatedList", - "path": "{project}/aggregated/addresses", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of addresses.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "AddressAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.addresses.delete", - "path": "{project}/regions/{region}/addresses/{address}", - "httpMethod": "DELETE", - "description": "Deletes the specified address resource.", - "parameters": { - "address": { - "type": "string", - "description": "Name of the address resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "address" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.addresses.get", - "path": "{project}/regions/{region}/addresses/{address}", - "httpMethod": "GET", - "description": "Returns the specified address resource.", - "parameters": { - "address": { - "type": "string", - "description": "Name of the address resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "address" - ], - "response": { - "$ref": "Address" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.addresses.insert", - "path": "{project}/regions/{region}/addresses", - "httpMethod": "POST", - "description": "Creates an address resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "Address" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.addresses.list", - "path": "{project}/regions/{region}/addresses", - "httpMethod": "GET", - "description": "Retrieves a list of addresses contained within the specified region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "AddressList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.addresses.testIamPermissions", - "path": "{project}/regions/{region}/addresses/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "autoscalers": { - "methods": { - "aggregatedList": { - "id": "compute.autoscalers.aggregatedList", - "path": "{project}/aggregated/autoscalers", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of autoscalers.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "AutoscalerAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.autoscalers.delete", - "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", - "httpMethod": "DELETE", - "description": "Deletes the specified autoscaler.", - "parameters": { - "autoscaler": { - "type": "string", - "description": "Name of the autoscaler to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "autoscaler" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.autoscalers.get", - "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", - "httpMethod": "GET", - "description": "Returns the specified autoscaler resource. Get a list of available autoscalers by making a list() request.", - "parameters": { - "autoscaler": { - "type": "string", - "description": "Name of the autoscaler to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "autoscaler" - ], - "response": { - "$ref": "Autoscaler" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.autoscalers.insert", - "path": "{project}/zones/{zone}/autoscalers", - "httpMethod": "POST", - "description": "Creates an autoscaler in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "request": { - "$ref": "Autoscaler" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.autoscalers.list", - "path": "{project}/zones/{zone}/autoscalers", - "httpMethod": "GET", - "description": "Retrieves a list of autoscalers contained within the specified zone.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "response": { - "$ref": "AutoscalerList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.autoscalers.patch", - "path": "{project}/zones/{zone}/autoscalers", - "httpMethod": "PATCH", - "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports patch semantics.", - "parameters": { - "autoscaler": { - "type": "string", - "description": "Name of the autoscaler to patch.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "request": { - "$ref": "Autoscaler" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.autoscalers.testIamPermissions", - "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.autoscalers.update", - "path": "{project}/zones/{zone}/autoscalers", - "httpMethod": "PUT", - "description": "Updates an autoscaler in the specified project using the data included in the request.", - "parameters": { - "autoscaler": { - "type": "string", - "description": "Name of the autoscaler to update.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "request": { - "$ref": "Autoscaler" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "backendBuckets": { - "methods": { - "delete": { - "id": "compute.backendBuckets.delete", - "path": "{project}/global/backendBuckets/{backendBucket}", - "httpMethod": "DELETE", - "description": "Deletes the specified BackendBucket resource.", - "parameters": { - "backendBucket": { - "type": "string", - "description": "Name of the BackendBucket resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "backendBucket" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.backendBuckets.get", - "path": "{project}/global/backendBuckets/{backendBucket}", - "httpMethod": "GET", - "description": "Returns the specified BackendBucket resource. Get a list of available backend buckets by making a list() request.", - "parameters": { - "backendBucket": { - "type": "string", - "description": "Name of the BackendBucket resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "backendBucket" - ], - "response": { - "$ref": "BackendBucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.backendBuckets.insert", - "path": "{project}/global/backendBuckets", - "httpMethod": "POST", - "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "BackendBucket" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.backendBuckets.list", - "path": "{project}/global/backendBuckets", - "httpMethod": "GET", - "description": "Retrieves the list of BackendBucket resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "BackendBucketList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.backendBuckets.patch", - "path": "{project}/global/backendBuckets/{backendBucket}", - "httpMethod": "PATCH", - "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports patch semantics.", - "parameters": { - "backendBucket": { - "type": "string", - "description": "Name of the BackendBucket resource to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "backendBucket" - ], - "request": { - "$ref": "BackendBucket" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.backendBuckets.update", - "path": "{project}/global/backendBuckets/{backendBucket}", - "httpMethod": "PUT", - "description": "Updates the specified BackendBucket resource with the data included in the request.", - "parameters": { - "backendBucket": { - "type": "string", - "description": "Name of the BackendBucket resource to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "backendBucket" - ], - "request": { - "$ref": "BackendBucket" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "backendServices": { - "methods": { - "aggregatedList": { - "id": "compute.backendServices.aggregatedList", - "path": "{project}/aggregated/backendServices", - "httpMethod": "GET", - "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Name of the project scoping this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "BackendServiceAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.backendServices.delete", - "path": "{project}/global/backendServices/{backendService}", - "httpMethod": "DELETE", - "description": "Deletes the specified BackendService resource.", - "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "backendService" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.backendServices.get", - "path": "{project}/global/backendServices/{backendService}", - "httpMethod": "GET", - "description": "Returns the specified BackendService resource. Get a list of available backend services by making a list() request.", - "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "backendService" - ], - "response": { - "$ref": "BackendService" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getHealth": { - "id": "compute.backendServices.getHealth", - "path": "{project}/global/backendServices/{backendService}/getHealth", - "httpMethod": "POST", - "description": "Gets the most recent health check results for this BackendService.", - "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to which the queried instance belongs.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "backendService" - ], - "request": { - "$ref": "ResourceGroupReference" - }, - "response": { - "$ref": "BackendServiceGroupHealth" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.backendServices.insert", - "path": "{project}/global/backendServices", - "httpMethod": "POST", - "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "BackendService" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.backendServices.list", - "path": "{project}/global/backendServices", - "httpMethod": "GET", - "description": "Retrieves the list of BackendService resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "BackendServiceList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.backendServices.patch", - "path": "{project}/global/backendServices/{backendService}", - "httpMethod": "PATCH", - "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", - "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "backendService" - ], - "request": { - "$ref": "BackendService" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.backendServices.testIamPermissions", - "path": "{project}/global/backendServices/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.backendServices.update", - "path": "{project}/global/backendServices/{backendService}", - "httpMethod": "PUT", - "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "backendService" - ], - "request": { - "$ref": "BackendService" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "diskTypes": { - "methods": { - "aggregatedList": { - "id": "compute.diskTypes.aggregatedList", - "path": "{project}/aggregated/diskTypes", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of disk types.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "DiskTypeAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "get": { - "id": "compute.diskTypes.get", - "path": "{project}/zones/{zone}/diskTypes/{diskType}", - "httpMethod": "GET", - "description": "Returns the specified disk type. Get a list of available disk types by making a list() request.", - "parameters": { - "diskType": { - "type": "string", - "description": "Name of the disk type to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "diskType" - ], - "response": { - "$ref": "DiskType" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "id": "compute.diskTypes.list", - "path": "{project}/zones/{zone}/diskTypes", - "httpMethod": "GET", - "description": "Retrieves a list of disk types available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "response": { - "$ref": "DiskTypeList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "disks": { - "methods": { - "aggregatedList": { - "id": "compute.disks.aggregatedList", - "path": "{project}/aggregated/disks", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of persistent disks.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "DiskAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "createSnapshot": { - "id": "compute.disks.createSnapshot", - "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", - "httpMethod": "POST", - "description": "Creates a snapshot of a specified persistent disk.", - "parameters": { - "disk": { - "type": "string", - "description": "Name of the persistent disk to snapshot.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "guestFlush": { - "type": "boolean", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "disk" - ], - "request": { - "$ref": "Snapshot" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "delete": { - "id": "compute.disks.delete", - "path": "{project}/zones/{zone}/disks/{disk}", - "httpMethod": "DELETE", - "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - "parameters": { - "disk": { - "type": "string", - "description": "Name of the persistent disk to delete.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "disk" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.disks.get", - "path": "{project}/zones/{zone}/disks/{disk}", - "httpMethod": "GET", - "description": "Returns a specified persistent disk. Get a list of available persistent disks by making a list() request.", - "parameters": { - "disk": { - "type": "string", - "description": "Name of the persistent disk to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "disk" - ], - "response": { - "$ref": "Disk" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.disks.insert", - "path": "{project}/zones/{zone}/disks", - "httpMethod": "POST", - "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "sourceImage": { - "type": "string", - "description": "Optional. Source image to restore onto a disk.", - "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "request": { - "$ref": "Disk" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.disks.list", - "path": "{project}/zones/{zone}/disks", - "httpMethod": "GET", - "description": "Retrieves a list of persistent disks contained within the specified zone.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "response": { - "$ref": "DiskList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "resize": { - "id": "compute.disks.resize", - "path": "{project}/zones/{zone}/disks/{disk}/resize", - "httpMethod": "POST", - "description": "Resizes the specified persistent disk.", - "parameters": { - "disk": { - "type": "string", - "description": "The name of the persistent disk.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "disk" - ], - "request": { - "$ref": "DisksResizeRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setLabels": { - "id": "compute.disks.setLabels", - "path": "{project}/zones/{zone}/disks/{resource}/setLabels", - "httpMethod": "POST", - "description": "Sets the labels on a disk. To learn more about labels, read the Labeling or Tagging Resources documentation.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "request": { - "$ref": "ZoneSetLabelsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.disks.testIamPermissions", - "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "firewalls": { - "methods": { - "delete": { - "id": "compute.firewalls.delete", - "path": "{project}/global/firewalls/{firewall}", - "httpMethod": "DELETE", - "description": "Deletes the specified firewall.", - "parameters": { - "firewall": { - "type": "string", - "description": "Name of the firewall rule to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "firewall" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.firewalls.get", - "path": "{project}/global/firewalls/{firewall}", - "httpMethod": "GET", - "description": "Returns the specified firewall.", - "parameters": { - "firewall": { - "type": "string", - "description": "Name of the firewall rule to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "firewall" - ], - "response": { - "$ref": "Firewall" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.firewalls.insert", - "path": "{project}/global/firewalls", - "httpMethod": "POST", - "description": "Creates a firewall rule in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "Firewall" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.firewalls.list", - "path": "{project}/global/firewalls", - "httpMethod": "GET", - "description": "Retrieves the list of firewall rules available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "FirewallList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.firewalls.patch", - "path": "{project}/global/firewalls/{firewall}", - "httpMethod": "PATCH", - "description": "Updates the specified firewall rule with the data included in the request. This method supports patch semantics.", - "parameters": { - "firewall": { - "type": "string", - "description": "Name of the firewall rule to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "firewall" - ], - "request": { - "$ref": "Firewall" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.firewalls.testIamPermissions", - "path": "{project}/global/firewalls/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.firewalls.update", - "path": "{project}/global/firewalls/{firewall}", - "httpMethod": "PUT", - "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", - "parameters": { - "firewall": { - "type": "string", - "description": "Name of the firewall rule to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "firewall" - ], - "request": { - "$ref": "Firewall" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "forwardingRules": { - "methods": { - "aggregatedList": { - "id": "compute.forwardingRules.aggregatedList", - "path": "{project}/aggregated/forwardingRules", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of forwarding rules.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "ForwardingRuleAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.forwardingRules.delete", - "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", - "httpMethod": "DELETE", - "description": "Deletes the specified ForwardingRule resource.", - "parameters": { - "forwardingRule": { - "type": "string", - "description": "Name of the ForwardingRule resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "forwardingRule" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.forwardingRules.get", - "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", - "httpMethod": "GET", - "description": "Returns the specified ForwardingRule resource.", - "parameters": { - "forwardingRule": { - "type": "string", - "description": "Name of the ForwardingRule resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "forwardingRule" - ], - "response": { - "$ref": "ForwardingRule" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.forwardingRules.insert", - "path": "{project}/regions/{region}/forwardingRules", - "httpMethod": "POST", - "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "ForwardingRule" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.forwardingRules.list", - "path": "{project}/regions/{region}/forwardingRules", - "httpMethod": "GET", - "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "ForwardingRuleList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setTarget": { - "id": "compute.forwardingRules.setTarget", - "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", - "httpMethod": "POST", - "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", - "parameters": { - "forwardingRule": { - "type": "string", - "description": "Name of the ForwardingRule resource in which target is to be set.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "forwardingRule" - ], - "request": { - "$ref": "TargetReference" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.forwardingRules.testIamPermissions", - "path": "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "globalAddresses": { - "methods": { - "delete": { - "id": "compute.globalAddresses.delete", - "path": "{project}/global/addresses/{address}", - "httpMethod": "DELETE", - "description": "Deletes the specified address resource.", - "parameters": { - "address": { - "type": "string", - "description": "Name of the address resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "address" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.globalAddresses.get", - "path": "{project}/global/addresses/{address}", - "httpMethod": "GET", - "description": "Returns the specified address resource. Get a list of available addresses by making a list() request.", - "parameters": { - "address": { - "type": "string", - "description": "Name of the address resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "address" - ], - "response": { - "$ref": "Address" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.globalAddresses.insert", - "path": "{project}/global/addresses", - "httpMethod": "POST", - "description": "Creates an address resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "Address" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.globalAddresses.list", - "path": "{project}/global/addresses", - "httpMethod": "GET", - "description": "Retrieves a list of global addresses.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "AddressList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.globalAddresses.testIamPermissions", - "path": "{project}/global/addresses/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "globalForwardingRules": { - "methods": { - "delete": { - "id": "compute.globalForwardingRules.delete", - "path": "{project}/global/forwardingRules/{forwardingRule}", - "httpMethod": "DELETE", - "description": "Deletes the specified GlobalForwardingRule resource.", - "parameters": { - "forwardingRule": { - "type": "string", - "description": "Name of the ForwardingRule resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "forwardingRule" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.globalForwardingRules.get", - "path": "{project}/global/forwardingRules/{forwardingRule}", - "httpMethod": "GET", - "description": "Returns the specified GlobalForwardingRule resource. Get a list of available forwarding rules by making a list() request.", - "parameters": { - "forwardingRule": { - "type": "string", - "description": "Name of the ForwardingRule resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "forwardingRule" - ], - "response": { - "$ref": "ForwardingRule" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.globalForwardingRules.insert", - "path": "{project}/global/forwardingRules", - "httpMethod": "POST", - "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "ForwardingRule" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.globalForwardingRules.list", - "path": "{project}/global/forwardingRules", - "httpMethod": "GET", - "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "ForwardingRuleList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setTarget": { - "id": "compute.globalForwardingRules.setTarget", - "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", - "httpMethod": "POST", - "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", - "parameters": { - "forwardingRule": { - "type": "string", - "description": "Name of the ForwardingRule resource in which target is to be set.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "forwardingRule" - ], - "request": { - "$ref": "TargetReference" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.globalForwardingRules.testIamPermissions", - "path": "{project}/global/forwardingRules/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "globalOperations": { - "methods": { - "aggregatedList": { - "id": "compute.globalOperations.aggregatedList", - "path": "{project}/aggregated/operations", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of all operations.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "OperationAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.globalOperations.delete", - "path": "{project}/global/operations/{operation}", - "httpMethod": "DELETE", - "description": "Deletes the specified Operations resource.", - "parameters": { - "operation": { - "type": "string", - "description": "Name of the Operations resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "operation" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.globalOperations.get", - "path": "{project}/global/operations/{operation}", - "httpMethod": "GET", - "description": "Retrieves the specified Operations resource. Get a list of operations by making a list() request.", - "parameters": { - "operation": { - "type": "string", - "description": "Name of the Operations resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "operation" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "id": "compute.globalOperations.list", - "path": "{project}/global/operations", - "httpMethod": "GET", - "description": "Retrieves a list of Operation resources contained within the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "OperationList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "healthChecks": { - "methods": { - "delete": { - "id": "compute.healthChecks.delete", - "path": "{project}/global/healthChecks/{healthCheck}", - "httpMethod": "DELETE", - "description": "Deletes the specified HealthCheck resource.", - "parameters": { - "healthCheck": { - "type": "string", - "description": "Name of the HealthCheck resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "healthCheck" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.healthChecks.get", - "path": "{project}/global/healthChecks/{healthCheck}", - "httpMethod": "GET", - "description": "Returns the specified HealthCheck resource. Get a list of available health checks by making a list() request.", - "parameters": { - "healthCheck": { - "type": "string", - "description": "Name of the HealthCheck resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "healthCheck" - ], - "response": { - "$ref": "HealthCheck" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.healthChecks.insert", - "path": "{project}/global/healthChecks", - "httpMethod": "POST", - "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "HealthCheck" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.healthChecks.list", - "path": "{project}/global/healthChecks", - "httpMethod": "GET", - "description": "Retrieves the list of HealthCheck resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "HealthCheckList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.healthChecks.patch", - "path": "{project}/global/healthChecks/{healthCheck}", - "httpMethod": "PATCH", - "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", - "parameters": { - "healthCheck": { - "type": "string", - "description": "Name of the HealthCheck resource to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "healthCheck" - ], - "request": { - "$ref": "HealthCheck" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.healthChecks.testIamPermissions", - "path": "{project}/global/healthChecks/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.healthChecks.update", - "path": "{project}/global/healthChecks/{healthCheck}", - "httpMethod": "PUT", - "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - "parameters": { - "healthCheck": { - "type": "string", - "description": "Name of the HealthCheck resource to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "healthCheck" - ], - "request": { - "$ref": "HealthCheck" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "httpHealthChecks": { - "methods": { - "delete": { - "id": "compute.httpHealthChecks.delete", - "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - "httpMethod": "DELETE", - "description": "Deletes the specified HttpHealthCheck resource.", - "parameters": { - "httpHealthCheck": { - "type": "string", - "description": "Name of the HttpHealthCheck resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "httpHealthCheck" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.httpHealthChecks.get", - "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - "httpMethod": "GET", - "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", - "parameters": { - "httpHealthCheck": { - "type": "string", - "description": "Name of the HttpHealthCheck resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "httpHealthCheck" - ], - "response": { - "$ref": "HttpHealthCheck" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.httpHealthChecks.insert", - "path": "{project}/global/httpHealthChecks", - "httpMethod": "POST", - "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "HttpHealthCheck" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.httpHealthChecks.list", - "path": "{project}/global/httpHealthChecks", - "httpMethod": "GET", - "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "HttpHealthCheckList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.httpHealthChecks.patch", - "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - "httpMethod": "PATCH", - "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", - "parameters": { - "httpHealthCheck": { - "type": "string", - "description": "Name of the HttpHealthCheck resource to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "httpHealthCheck" - ], - "request": { - "$ref": "HttpHealthCheck" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.httpHealthChecks.testIamPermissions", - "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.httpHealthChecks.update", - "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - "httpMethod": "PUT", - "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", - "parameters": { - "httpHealthCheck": { - "type": "string", - "description": "Name of the HttpHealthCheck resource to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "httpHealthCheck" - ], - "request": { - "$ref": "HttpHealthCheck" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "httpsHealthChecks": { - "methods": { - "delete": { - "id": "compute.httpsHealthChecks.delete", - "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - "httpMethod": "DELETE", - "description": "Deletes the specified HttpsHealthCheck resource.", - "parameters": { - "httpsHealthCheck": { - "type": "string", - "description": "Name of the HttpsHealthCheck resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "httpsHealthCheck" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.httpsHealthChecks.get", - "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - "httpMethod": "GET", - "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", - "parameters": { - "httpsHealthCheck": { - "type": "string", - "description": "Name of the HttpsHealthCheck resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "httpsHealthCheck" - ], - "response": { - "$ref": "HttpsHealthCheck" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.httpsHealthChecks.insert", - "path": "{project}/global/httpsHealthChecks", - "httpMethod": "POST", - "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "HttpsHealthCheck" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.httpsHealthChecks.list", - "path": "{project}/global/httpsHealthChecks", - "httpMethod": "GET", - "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "HttpsHealthCheckList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.httpsHealthChecks.patch", - "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - "httpMethod": "PATCH", - "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", - "parameters": { - "httpsHealthCheck": { - "type": "string", - "description": "Name of the HttpsHealthCheck resource to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "httpsHealthCheck" - ], - "request": { - "$ref": "HttpsHealthCheck" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.httpsHealthChecks.testIamPermissions", - "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.httpsHealthChecks.update", - "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - "httpMethod": "PUT", - "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", - "parameters": { - "httpsHealthCheck": { - "type": "string", - "description": "Name of the HttpsHealthCheck resource to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "httpsHealthCheck" - ], - "request": { - "$ref": "HttpsHealthCheck" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "images": { - "methods": { - "delete": { - "id": "compute.images.delete", - "path": "{project}/global/images/{image}", - "httpMethod": "DELETE", - "description": "Deletes the specified image.", - "parameters": { - "image": { - "type": "string", - "description": "Name of the image resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "image" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "deprecate": { - "id": "compute.images.deprecate", - "path": "{project}/global/images/{image}/deprecate", - "httpMethod": "POST", - "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", - "parameters": { - "image": { - "type": "string", - "description": "Image name.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "image" - ], - "request": { - "$ref": "DeprecationStatus" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.images.get", - "path": "{project}/global/images/{image}", - "httpMethod": "GET", - "description": "Returns the specified image. Get a list of available images by making a list() request.", - "parameters": { - "image": { - "type": "string", - "description": "Name of the image resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "image" - ], - "response": { - "$ref": "Image" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getFromFamily": { - "id": "compute.images.getFromFamily", - "path": "{project}/global/images/family/{family}", - "httpMethod": "GET", - "description": "Returns the latest image that is part of an image family and is not deprecated.", - "parameters": { - "family": { - "type": "string", - "description": "Name of the image family to search for.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "family" - ], - "response": { - "$ref": "Image" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.images.insert", - "path": "{project}/global/images", - "httpMethod": "POST", - "description": "Creates an image in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "Image" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "list": { - "id": "compute.images.list", - "path": "{project}/global/images", - "httpMethod": "GET", - "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "ImageList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setLabels": { - "id": "compute.images.setLabels", - "path": "{project}/global/images/{resource}/setLabels", - "httpMethod": "POST", - "description": "Sets the labels on an image. To learn more about labels, read the Labeling or Tagging Resources documentation.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "GlobalSetLabelsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.images.testIamPermissions", - "path": "{project}/global/images/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "instanceGroupManagers": { - "methods": { - "abandonInstances": { - "id": "compute.instanceGroupManagers.abandonInstances", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - "httpMethod": "POST", - "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "request": { - "$ref": "InstanceGroupManagersAbandonInstancesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "aggregatedList": { - "id": "compute.instanceGroupManagers.aggregatedList", - "path": "{project}/aggregated/instanceGroupManagers", - "httpMethod": "GET", - "description": "Retrieves the list of managed instance groups and groups them by zone.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "InstanceGroupManagerAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.instanceGroupManagers.delete", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "DELETE", - "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group to delete.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "deleteInstances": { - "id": "compute.instanceGroupManagers.deleteInstances", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - "httpMethod": "POST", - "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "request": { - "$ref": "InstanceGroupManagersDeleteInstancesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.instanceGroupManagers.get", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "GET", - "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "response": { - "$ref": "InstanceGroupManager" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.instanceGroupManagers.insert", - "path": "{project}/zones/{zone}/instanceGroupManagers", - "httpMethod": "POST", - "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where you want to create the managed instance group.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "request": { - "$ref": "InstanceGroupManager" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.instanceGroupManagers.list", - "path": "{project}/zones/{zone}/instanceGroupManagers", - "httpMethod": "GET", - "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "response": { - "$ref": "InstanceGroupManagerList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "listManagedInstances": { - "id": "compute.instanceGroupManagers.listManagedInstances", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", - "httpMethod": "POST", - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", - "parameters": { - "filter": { - "type": "string", - "location": "query" - }, - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, - "maxResults": { - "type": "integer", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "order_by": { - "type": "string", - "location": "query" - }, - "pageToken": { - "type": "string", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "response": { - "$ref": "InstanceGroupManagersListManagedInstancesResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.instanceGroupManagers.patch", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "PATCH", - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports patch semantics.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the instance group manager.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where you want to create the managed instance group.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "request": { - "$ref": "InstanceGroupManager" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "recreateInstances": { - "id": "compute.instanceGroupManagers.recreateInstances", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", - "httpMethod": "POST", - "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "request": { - "$ref": "InstanceGroupManagersRecreateInstancesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "resize": { - "id": "compute.instanceGroupManagers.resize", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", - "httpMethod": "POST", - "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "size": { - "type": "integer", - "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - "required": true, - "format": "int32", - "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager", - "size" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "resizeAdvanced": { - "id": "compute.instanceGroupManagers.resizeAdvanced", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", - "httpMethod": "POST", - "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "request": { - "$ref": "InstanceGroupManagersResizeAdvancedRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setAutoHealingPolicies": { - "id": "compute.instanceGroupManagers.setAutoHealingPolicies", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", - "httpMethod": "POST", - "description": "Modifies the autohealing policies.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the instance group manager.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "request": { - "$ref": "InstanceGroupManagersSetAutoHealingRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setInstanceTemplate": { - "id": "compute.instanceGroupManagers.setInstanceTemplate", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - "httpMethod": "POST", - "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "request": { - "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setTargetPools": { - "id": "compute.instanceGroupManagers.setTargetPools", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - "httpMethod": "POST", - "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "request": { - "$ref": "InstanceGroupManagersSetTargetPoolsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.instanceGroupManagers.testIamPermissions", - "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.instanceGroupManagers.update", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "PUT", - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the instance group manager.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where you want to create the managed instance group.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "request": { - "$ref": "InstanceGroupManager" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "instanceGroups": { - "methods": { - "addInstances": { - "id": "compute.instanceGroups.addInstances", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", - "httpMethod": "POST", - "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", - "parameters": { - "instanceGroup": { - "type": "string", - "description": "The name of the instance group where you are adding instances.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroup" - ], - "request": { - "$ref": "InstanceGroupsAddInstancesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "aggregatedList": { - "id": "compute.instanceGroups.aggregatedList", - "path": "{project}/aggregated/instanceGroups", - "httpMethod": "GET", - "description": "Retrieves the list of instance groups and sorts them by zone.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "InstanceGroupAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.instanceGroups.delete", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", - "httpMethod": "DELETE", - "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", - "parameters": { - "instanceGroup": { - "type": "string", - "description": "The name of the instance group to delete.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroup" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.instanceGroups.get", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", - "httpMethod": "GET", - "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", - "parameters": { - "instanceGroup": { - "type": "string", - "description": "The name of the instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroup" - ], - "response": { - "$ref": "InstanceGroup" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.instanceGroups.insert", - "path": "{project}/zones/{zone}/instanceGroups", - "httpMethod": "POST", - "description": "Creates an instance group in the specified project using the parameters that are included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where you want to create the instance group.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "request": { - "$ref": "InstanceGroup" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.instanceGroups.list", - "path": "{project}/zones/{zone}/instanceGroups", - "httpMethod": "GET", - "description": "Retrieves the list of instance groups that are located in the specified project and zone.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "response": { - "$ref": "InstanceGroupList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "listInstances": { - "id": "compute.instanceGroups.listInstances", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", - "httpMethod": "POST", - "description": "Lists the instances in the specified instance group.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "instanceGroup": { - "type": "string", - "description": "The name of the instance group from which you want to generate a list of included instances.", - "required": true, - "location": "path" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroup" - ], - "request": { - "$ref": "InstanceGroupsListInstancesRequest" - }, - "response": { - "$ref": "InstanceGroupsListInstances" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "removeInstances": { - "id": "compute.instanceGroups.removeInstances", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", - "httpMethod": "POST", - "description": "Removes one or more instances from the specified instance group, but does not delete those instances.", - "parameters": { - "instanceGroup": { - "type": "string", - "description": "The name of the instance group where the specified instances will be removed.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroup" - ], - "request": { - "$ref": "InstanceGroupsRemoveInstancesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setNamedPorts": { - "id": "compute.instanceGroups.setNamedPorts", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", - "httpMethod": "POST", - "description": "Sets the named ports for the specified instance group.", - "parameters": { - "instanceGroup": { - "type": "string", - "description": "The name of the instance group where the named ports are updated.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroup" - ], - "request": { - "$ref": "InstanceGroupsSetNamedPortsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.instanceGroups.testIamPermissions", - "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "instanceTemplates": { - "methods": { - "delete": { - "id": "compute.instanceTemplates.delete", - "path": "{project}/global/instanceTemplates/{instanceTemplate}", - "httpMethod": "DELETE", - "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", - "parameters": { - "instanceTemplate": { - "type": "string", - "description": "The name of the instance template to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "instanceTemplate" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.instanceTemplates.get", - "path": "{project}/global/instanceTemplates/{instanceTemplate}", - "httpMethod": "GET", - "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", - "parameters": { - "instanceTemplate": { - "type": "string", - "description": "The name of the instance template.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "instanceTemplate" - ], - "response": { - "$ref": "InstanceTemplate" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.instanceTemplates.insert", - "path": "{project}/global/instanceTemplates", - "httpMethod": "POST", - "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "InstanceTemplate" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.instanceTemplates.list", - "path": "{project}/global/instanceTemplates", - "httpMethod": "GET", - "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "InstanceTemplateList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.instanceTemplates.testIamPermissions", - "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "instances": { - "methods": { - "addAccessConfig": { - "id": "compute.instances.addAccessConfig", - "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", - "httpMethod": "POST", - "description": "Adds an access config to an instance's network interface.", - "parameters": { - "instance": { - "type": "string", - "description": "The instance name for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "networkInterface": { - "type": "string", - "description": "The name of the network interface to add to this instance.", - "required": true, - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance", - "networkInterface" - ], - "request": { - "$ref": "AccessConfig" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "aggregatedList": { - "id": "compute.instances.aggregatedList", - "path": "{project}/aggregated/instances", - "httpMethod": "GET", - "description": "Retrieves aggregated list of instances.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "InstanceAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "attachDisk": { - "id": "compute.instances.attachDisk", - "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", - "httpMethod": "POST", - "description": "Attaches a Disk resource to an instance.", - "parameters": { - "instance": { - "type": "string", - "description": "The instance name for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "request": { - "$ref": "AttachedDisk" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "delete": { - "id": "compute.instances.delete", - "path": "{project}/zones/{zone}/instances/{instance}", - "httpMethod": "DELETE", - "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "deleteAccessConfig": { - "id": "compute.instances.deleteAccessConfig", - "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", - "httpMethod": "POST", - "description": "Deletes an access config from an instance's network interface.", - "parameters": { - "accessConfig": { - "type": "string", - "description": "The name of the access config to delete.", - "required": true, - "location": "query" - }, - "instance": { - "type": "string", - "description": "The instance name for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "networkInterface": { - "type": "string", - "description": "The name of the network interface.", - "required": true, - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance", - "accessConfig", - "networkInterface" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "detachDisk": { - "id": "compute.instances.detachDisk", - "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", - "httpMethod": "POST", - "description": "Detaches a disk from an instance.", - "parameters": { - "deviceName": { - "type": "string", - "description": "Disk device name to detach.", - "required": true, - "location": "query" - }, - "instance": { - "type": "string", - "description": "Instance name.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance", - "deviceName" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.instances.get", - "path": "{project}/zones/{zone}/instances/{instance}", - "httpMethod": "GET", - "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "response": { - "$ref": "Instance" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getSerialPortOutput": { - "id": "compute.instances.getSerialPortOutput", - "path": "{project}/zones/{zone}/instances/{instance}/serialPort", - "httpMethod": "GET", - "description": "Returns the specified instance's serial port output.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "port": { - "type": "integer", - "description": "Specifies which COM or serial port to retrieve data from.", - "default": "1", - "format": "int32", - "minimum": "1", - "maximum": "4", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "start": { - "type": "string", - "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", - "format": "int64", - "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "response": { - "$ref": "SerialPortOutput" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.instances.insert", - "path": "{project}/zones/{zone}/instances", - "httpMethod": "POST", - "description": "Creates an instance resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "request": { - "$ref": "Instance" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.instances.list", - "path": "{project}/zones/{zone}/instances", - "httpMethod": "GET", - "description": "Retrieves the list of instances contained within the specified zone.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "response": { - "$ref": "InstanceList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "listReferrers": { - "id": "compute.instances.listReferrers", - "path": "{project}/zones/{zone}/instances/{instance}/referrers", - "httpMethod": "GET", - "description": "Retrieves the list of referrers to instances contained within the specified zone.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "instance": { - "type": "string", - "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", - "required": true, - "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "response": { - "$ref": "InstanceListReferrers" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "reset": { - "id": "compute.instances.reset", - "path": "{project}/zones/{zone}/instances/{instance}/reset", - "httpMethod": "POST", - "description": "Performs a reset on the instance. For more information, see Resetting an instance.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setDiskAutoDelete": { - "id": "compute.instances.setDiskAutoDelete", - "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", - "httpMethod": "POST", - "description": "Sets the auto-delete flag for a disk attached to an instance.", - "parameters": { - "autoDelete": { - "type": "boolean", - "description": "Whether to auto-delete the disk when the instance is deleted.", - "required": true, - "location": "query" - }, - "deviceName": { - "type": "string", - "description": "The device name of the disk to modify.", - "required": true, - "pattern": "\\w[\\w.-]{0,254}", - "location": "query" - }, - "instance": { - "type": "string", - "description": "The instance name.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance", - "autoDelete", - "deviceName" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setLabels": { - "id": "compute.instances.setLabels", - "path": "{project}/zones/{zone}/instances/{instance}/setLabels", - "httpMethod": "POST", - "description": "Sets labels on an instance. To learn more about labels, read the Labeling or Tagging Resources documentation.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "request": { - "$ref": "InstancesSetLabelsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setMachineResources": { - "id": "compute.instances.setMachineResources", - "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", - "httpMethod": "POST", - "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "request": { - "$ref": "InstancesSetMachineResourcesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setMachineType": { - "id": "compute.instances.setMachineType", - "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", - "httpMethod": "POST", - "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "request": { - "$ref": "InstancesSetMachineTypeRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setMetadata": { - "id": "compute.instances.setMetadata", - "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", - "httpMethod": "POST", - "description": "Sets metadata for the specified instance to the data included in the request.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "request": { - "$ref": "Metadata" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setScheduling": { - "id": "compute.instances.setScheduling", - "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", - "httpMethod": "POST", - "description": "Sets an instance's scheduling options.", - "parameters": { - "instance": { - "type": "string", - "description": "Instance name.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "request": { - "$ref": "Scheduling" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setServiceAccount": { - "id": "compute.instances.setServiceAccount", - "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", - "httpMethod": "POST", - "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance resource to start.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "request": { - "$ref": "InstancesSetServiceAccountRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setTags": { - "id": "compute.instances.setTags", - "path": "{project}/zones/{zone}/instances/{instance}/setTags", - "httpMethod": "POST", - "description": "Sets tags for the specified instance to the data included in the request.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "request": { - "$ref": "Tags" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "start": { - "id": "compute.instances.start", - "path": "{project}/zones/{zone}/instances/{instance}/start", - "httpMethod": "POST", - "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance resource to start.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "startWithEncryptionKey": { - "id": "compute.instances.startWithEncryptionKey", - "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", - "httpMethod": "POST", - "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance resource to start.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "request": { - "$ref": "InstancesStartWithEncryptionKeyRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "stop": { - "id": "compute.instances.stop", - "path": "{project}/zones/{zone}/instances/{instance}/stop", - "httpMethod": "POST", - "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance resource to stop.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.instances.testIamPermissions", - "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "licenses": { - "methods": { - "get": { - "id": "compute.licenses.get", - "path": "{project}/global/licenses/{license}", - "httpMethod": "GET", - "description": "Returns the specified License resource. Get a list of available licenses by making a list() request.", - "parameters": { - "license": { - "type": "string", - "description": "Name of the License resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "license" - ], - "response": { - "$ref": "License" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "machineTypes": { - "methods": { - "aggregatedList": { - "id": "compute.machineTypes.aggregatedList", - "path": "{project}/aggregated/machineTypes", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of machine types.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "MachineTypeAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "get": { - "id": "compute.machineTypes.get", - "path": "{project}/zones/{zone}/machineTypes/{machineType}", - "httpMethod": "GET", - "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", - "parameters": { - "machineType": { - "type": "string", - "description": "Name of the machine type to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "machineType" - ], - "response": { - "$ref": "MachineType" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "id": "compute.machineTypes.list", - "path": "{project}/zones/{zone}/machineTypes", - "httpMethod": "GET", - "description": "Retrieves a list of machine types available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "response": { - "$ref": "MachineTypeList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "networks": { - "methods": { - "addPeering": { - "id": "compute.networks.addPeering", - "path": "{project}/global/networks/{network}/addPeering", - "httpMethod": "POST", - "description": "Adds a peering to the specified network.", - "parameters": { - "network": { - "type": "string", - "description": "Name of the network resource to add peering to.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "network" - ], - "request": { - "$ref": "NetworksAddPeeringRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "delete": { - "id": "compute.networks.delete", - "path": "{project}/global/networks/{network}", - "httpMethod": "DELETE", - "description": "Deletes the specified network.", - "parameters": { - "network": { - "type": "string", - "description": "Name of the network to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "network" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.networks.get", - "path": "{project}/global/networks/{network}", - "httpMethod": "GET", - "description": "Returns the specified network. Get a list of available networks by making a list() request.", - "parameters": { - "network": { - "type": "string", - "description": "Name of the network to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "network" - ], - "response": { - "$ref": "Network" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.networks.insert", - "path": "{project}/global/networks", - "httpMethod": "POST", - "description": "Creates a network in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "Network" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.networks.list", - "path": "{project}/global/networks", - "httpMethod": "GET", - "description": "Retrieves the list of networks available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "NetworkList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "removePeering": { - "id": "compute.networks.removePeering", - "path": "{project}/global/networks/{network}/removePeering", - "httpMethod": "POST", - "description": "Removes a peering from the specified network.", - "parameters": { - "network": { - "type": "string", - "description": "Name of the network resource to remove peering from.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "network" - ], - "request": { - "$ref": "NetworksRemovePeeringRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "switchToCustomMode": { - "id": "compute.networks.switchToCustomMode", - "path": "{project}/global/networks/{network}/switchToCustomMode", - "httpMethod": "POST", - "description": "Switches the network mode from auto subnet mode to custom subnet mode.", - "parameters": { - "network": { - "type": "string", - "description": "Name of the network to be updated.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "network" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.networks.testIamPermissions", - "path": "{project}/global/networks/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "projects": { - "methods": { - "disableXpnHost": { - "id": "compute.projects.disableXpnHost", - "path": "{project}/disableXpnHost", - "httpMethod": "POST", - "description": "Disable this project as an XPN host project.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "disableXpnResource": { - "id": "compute.projects.disableXpnResource", - "path": "{project}/disableXpnResource", - "httpMethod": "POST", - "description": "Disable an XPN resource associated with this host project.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "ProjectsDisableXpnResourceRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "enableXpnHost": { - "id": "compute.projects.enableXpnHost", - "path": "{project}/enableXpnHost", - "httpMethod": "POST", - "description": "Enable this project as an XPN host project.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "enableXpnResource": { - "id": "compute.projects.enableXpnResource", - "path": "{project}/enableXpnResource", - "httpMethod": "POST", - "description": "Enable XPN resource (a.k.a service project or service folder in the future) for a host project, so that subnetworks in the host project can be used by instances in the service project or folder.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "ProjectsEnableXpnResourceRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.projects.get", - "path": "{project}", - "httpMethod": "GET", - "description": "Returns the specified Project resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "Project" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getXpnHost": { - "id": "compute.projects.getXpnHost", - "path": "{project}/getXpnHost", - "httpMethod": "GET", - "description": "Get the XPN host project that this project links to. May be empty if no link exists.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "Project" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getXpnResources": { - "id": "compute.projects.getXpnResources", - "path": "{project}/getXpnResources", - "httpMethod": "GET", - "description": "Get XPN resources associated with this host project.", - "parameters": { - "filter": { - "type": "string", - "location": "query" - }, - "maxResults": { - "type": "integer", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "order_by": { - "type": "string", - "location": "query" - }, - "pageToken": { - "type": "string", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "ProjectsGetXpnResources" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "listXpnHosts": { - "id": "compute.projects.listXpnHosts", - "path": "{project}/listXpnHosts", - "httpMethod": "POST", - "description": "List all XPN host projects visible to the user in an organization.", - "parameters": { - "filter": { - "type": "string", - "location": "query" - }, - "maxResults": { - "type": "integer", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "order_by": { - "type": "string", - "location": "query" - }, - "pageToken": { - "type": "string", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "ProjectsListXpnHostsRequest" - }, - "response": { - "$ref": "XpnHostList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "moveDisk": { - "id": "compute.projects.moveDisk", - "path": "{project}/moveDisk", - "httpMethod": "POST", - "description": "Moves a persistent disk from one zone to another.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "DiskMoveRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "moveInstance": { - "id": "compute.projects.moveInstance", - "path": "{project}/moveInstance", - "httpMethod": "POST", - "description": "Moves an instance and its attached persistent disks from one zone to another.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "InstanceMoveRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setCommonInstanceMetadata": { - "id": "compute.projects.setCommonInstanceMetadata", - "path": "{project}/setCommonInstanceMetadata", - "httpMethod": "POST", - "description": "Sets metadata common to all instances within the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "Metadata" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setUsageExportBucket": { - "id": "compute.projects.setUsageExportBucket", - "path": "{project}/setUsageExportBucket", - "httpMethod": "POST", - "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "UsageExportLocation" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - } - } - }, - "regionAutoscalers": { - "methods": { - "delete": { - "id": "compute.regionAutoscalers.delete", - "path": "{project}/regions/{region}/autoscalers/{autoscaler}", - "httpMethod": "DELETE", - "description": "Deletes the specified autoscaler.", - "parameters": { - "autoscaler": { - "type": "string", - "description": "Name of the autoscaler to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "autoscaler" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.regionAutoscalers.get", - "path": "{project}/regions/{region}/autoscalers/{autoscaler}", - "httpMethod": "GET", - "description": "Returns the specified autoscaler.", - "parameters": { - "autoscaler": { - "type": "string", - "description": "Name of the autoscaler to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "autoscaler" - ], - "response": { - "$ref": "Autoscaler" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.regionAutoscalers.insert", - "path": "{project}/regions/{region}/autoscalers", - "httpMethod": "POST", - "description": "Creates an autoscaler in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "Autoscaler" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.regionAutoscalers.list", - "path": "{project}/regions/{region}/autoscalers", - "httpMethod": "GET", - "description": "Retrieves a list of autoscalers contained within the specified region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "RegionAutoscalerList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.regionAutoscalers.patch", - "path": "{project}/regions/{region}/autoscalers", - "httpMethod": "PATCH", - "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports patch semantics.", - "parameters": { - "autoscaler": { - "type": "string", - "description": "Name of the autoscaler to patch.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "Autoscaler" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.regionAutoscalers.testIamPermissions", - "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.regionAutoscalers.update", - "path": "{project}/regions/{region}/autoscalers", - "httpMethod": "PUT", - "description": "Updates an autoscaler in the specified project using the data included in the request.", - "parameters": { - "autoscaler": { - "type": "string", - "description": "Name of the autoscaler to update.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "Autoscaler" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "regionBackendServices": { - "methods": { - "delete": { - "id": "compute.regionBackendServices.delete", - "path": "{project}/regions/{region}/backendServices/{backendService}", - "httpMethod": "DELETE", - "description": "Deletes the specified regional BackendService resource.", - "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "backendService" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.regionBackendServices.get", - "path": "{project}/regions/{region}/backendServices/{backendService}", - "httpMethod": "GET", - "description": "Returns the specified regional BackendService resource.", - "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "backendService" - ], - "response": { - "$ref": "BackendService" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getHealth": { - "id": "compute.regionBackendServices.getHealth", - "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", - "httpMethod": "POST", - "description": "Gets the most recent health check results for this regional BackendService.", - "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to which the queried instance belongs.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "backendService" - ], - "request": { - "$ref": "ResourceGroupReference" - }, - "response": { - "$ref": "BackendServiceGroupHealth" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.regionBackendServices.insert", - "path": "{project}/regions/{region}/backendServices", - "httpMethod": "POST", - "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "BackendService" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.regionBackendServices.list", - "path": "{project}/regions/{region}/backendServices", - "httpMethod": "GET", - "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "BackendServiceList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.regionBackendServices.patch", - "path": "{project}/regions/{region}/backendServices/{backendService}", - "httpMethod": "PATCH", - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", - "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "backendService" - ], - "request": { - "$ref": "BackendService" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.regionBackendServices.testIamPermissions", - "path": "{project}/regions/{region}/backendServices/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.regionBackendServices.update", - "path": "{project}/regions/{region}/backendServices/{backendService}", - "httpMethod": "PUT", - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "backendService" - ], - "request": { - "$ref": "BackendService" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "regionCommitments": { - "methods": { - "aggregatedList": { - "id": "compute.regionCommitments.aggregatedList", - "path": "{project}/aggregated/commitments", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of commitments.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "CommitmentAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "get": { - "id": "compute.regionCommitments.get", - "path": "{project}/regions/{region}/commitments/{commitment}", - "httpMethod": "GET", - "description": "Returns the specified commitment resource. Get a list of available commitments by making a list() request.", - "parameters": { - "commitment": { - "type": "string", - "description": "Name of the commitment to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "commitment" - ], - "response": { - "$ref": "Commitment" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.regionCommitments.insert", - "path": "{project}/regions/{region}/commitments", - "httpMethod": "POST", - "description": "Creates an commitment in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "Commitment" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.regionCommitments.list", - "path": "{project}/regions/{region}/commitments", - "httpMethod": "GET", - "description": "Retrieves a list of commitments contained within the specified region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "CommitmentList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "regionInstanceGroupManagers": { - "methods": { - "abandonInstances": { - "id": "compute.regionInstanceGroupManagers.abandonInstances", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - "httpMethod": "POST", - "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "Name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager" - ], - "request": { - "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "delete": { - "id": "compute.regionInstanceGroupManagers.delete", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "DELETE", - "description": "Deletes the specified managed instance group and all of the instances in that group.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "Name of the managed instance group to delete.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "deleteInstances": { - "id": "compute.regionInstanceGroupManagers.deleteInstances", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - "httpMethod": "POST", - "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "Name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager" - ], - "request": { - "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.regionInstanceGroupManagers.get", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "GET", - "description": "Returns all of the details about the specified managed instance group.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "Name of the managed instance group to return.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager" - ], - "response": { - "$ref": "InstanceGroupManager" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.regionInstanceGroupManagers.insert", - "path": "{project}/regions/{region}/instanceGroupManagers", - "httpMethod": "POST", - "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "InstanceGroupManager" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.regionInstanceGroupManagers.list", - "path": "{project}/regions/{region}/instanceGroupManagers", - "httpMethod": "GET", - "description": "Retrieves the list of managed instance groups that are contained within the specified region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "RegionInstanceGroupManagerList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "listManagedInstances": { - "id": "compute.regionInstanceGroupManagers.listManagedInstances", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", - "httpMethod": "POST", - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", - "parameters": { - "filter": { - "type": "string", - "location": "query" - }, - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, - "maxResults": { - "type": "integer", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "order_by": { - "type": "string", - "location": "query" - }, - "pageToken": { - "type": "string", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager" - ], - "response": { - "$ref": "RegionInstanceGroupManagersListInstancesResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.regionInstanceGroupManagers.patch", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "PATCH", - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports patch semantics.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the instance group manager.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager" - ], - "request": { - "$ref": "InstanceGroupManager" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "recreateInstances": { - "id": "compute.regionInstanceGroupManagers.recreateInstances", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", - "httpMethod": "POST", - "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "Name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager" - ], - "request": { - "$ref": "RegionInstanceGroupManagersRecreateRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "resize": { - "id": "compute.regionInstanceGroupManagers.resize", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", - "httpMethod": "POST", - "description": "Changes the intended size for the managed instance group. If you increase the size, the group schedules actions to create new instances using the current instance template. If you decrease the size, the group schedules delete actions on one or more instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "Name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - }, - "size": { - "type": "integer", - "description": "Number of instances that should exist in this instance group manager.", - "required": true, - "format": "int32", - "minimum": "0", - "location": "query" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager", - "size" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setAutoHealingPolicies": { - "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", - "httpMethod": "POST", - "description": "Modifies the autohealing policy for the instances in this managed instance group.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "Name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager" - ], - "request": { - "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setInstanceTemplate": { - "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - "httpMethod": "POST", - "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager" - ], - "request": { - "$ref": "RegionInstanceGroupManagersSetTemplateRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setTargetPools": { - "id": "compute.regionInstanceGroupManagers.setTargetPools", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - "httpMethod": "POST", - "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "Name of the managed instance group.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager" - ], - "request": { - "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.regionInstanceGroupManagers.testIamPermissions", - "path": "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.regionInstanceGroupManagers.update", - "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "PUT", - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the instance group manager.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroupManager" - ], - "request": { - "$ref": "InstanceGroupManager" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "regionInstanceGroups": { - "methods": { - "get": { - "id": "compute.regionInstanceGroups.get", - "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", - "httpMethod": "GET", - "description": "Returns the specified instance group resource.", - "parameters": { - "instanceGroup": { - "type": "string", - "description": "Name of the instance group resource to return.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroup" - ], - "response": { - "$ref": "InstanceGroup" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "id": "compute.regionInstanceGroups.list", - "path": "{project}/regions/{region}/instanceGroups", - "httpMethod": "GET", - "description": "Retrieves the list of instance group resources contained within the specified region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "RegionInstanceGroupList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "listInstances": { - "id": "compute.regionInstanceGroups.listInstances", - "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - "httpMethod": "POST", - "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "instanceGroup": { - "type": "string", - "description": "Name of the regional instance group for which we want to list the instances.", - "required": true, - "location": "path" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroup" - ], - "request": { - "$ref": "RegionInstanceGroupsListInstancesRequest" - }, - "response": { - "$ref": "RegionInstanceGroupsListInstances" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setNamedPorts": { - "id": "compute.regionInstanceGroups.setNamedPorts", - "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - "httpMethod": "POST", - "description": "Sets the named ports for the specified regional instance group.", - "parameters": { - "instanceGroup": { - "type": "string", - "description": "The name of the regional instance group where the named ports are updated.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "instanceGroup" - ], - "request": { - "$ref": "RegionInstanceGroupsSetNamedPortsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.regionInstanceGroups.testIamPermissions", - "path": "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "regionOperations": { - "methods": { - "delete": { - "id": "compute.regionOperations.delete", - "path": "{project}/regions/{region}/operations/{operation}", - "httpMethod": "DELETE", - "description": "Deletes the specified region-specific Operations resource.", - "parameters": { - "operation": { - "type": "string", - "description": "Name of the Operations resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "operation" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.regionOperations.get", - "path": "{project}/regions/{region}/operations/{operation}", - "httpMethod": "GET", - "description": "Retrieves the specified region-specific Operations resource.", - "parameters": { - "operation": { - "type": "string", - "description": "Name of the Operations resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "operation" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "id": "compute.regionOperations.list", - "path": "{project}/regions/{region}/operations", - "httpMethod": "GET", - "description": "Retrieves a list of Operation resources contained within the specified region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "OperationList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "regions": { - "methods": { - "get": { - "id": "compute.regions.get", - "path": "{project}/regions/{region}", - "httpMethod": "GET", - "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "Region" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "id": "compute.regions.list", - "path": "{project}/regions", - "httpMethod": "GET", - "description": "Retrieves the list of region resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "RegionList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "routers": { - "methods": { - "aggregatedList": { - "id": "compute.routers.aggregatedList", - "path": "{project}/aggregated/routers", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of routers.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "RouterAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.routers.delete", - "path": "{project}/regions/{region}/routers/{router}", - "httpMethod": "DELETE", - "description": "Deletes the specified Router resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "router": { - "type": "string", - "description": "Name of the Router resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "router" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.routers.get", - "path": "{project}/regions/{region}/routers/{router}", - "httpMethod": "GET", - "description": "Returns the specified Router resource. Get a list of available routers by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "router": { - "type": "string", - "description": "Name of the Router resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "router" - ], - "response": { - "$ref": "Router" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getRouterStatus": { - "id": "compute.routers.getRouterStatus", - "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", - "httpMethod": "GET", - "description": "Retrieves runtime information of the specified router.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "router": { - "type": "string", - "description": "Name of the Router resource to query.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "router" - ], - "response": { - "$ref": "RouterStatusResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.routers.insert", - "path": "{project}/regions/{region}/routers", - "httpMethod": "POST", - "description": "Creates a Router resource in the specified project and region using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "Router" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.routers.list", - "path": "{project}/regions/{region}/routers", - "httpMethod": "GET", - "description": "Retrieves a list of Router resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "RouterList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.routers.patch", - "path": "{project}/regions/{region}/routers/{router}", - "httpMethod": "PATCH", - "description": "Patches the specified Router resource with the data included in the request. This method supports patch semantics.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "router": { - "type": "string", - "description": "Name of the Router resource to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "router" - ], - "request": { - "$ref": "Router" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "preview": { - "id": "compute.routers.preview", - "path": "{project}/regions/{region}/routers/{router}/preview", - "httpMethod": "POST", - "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "router": { - "type": "string", - "description": "Name of the Router resource to query.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "router" - ], - "request": { - "$ref": "Router" - }, - "response": { - "$ref": "RoutersPreviewResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.routers.testIamPermissions", - "path": "{project}/regions/{region}/routers/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.routers.update", - "path": "{project}/regions/{region}/routers/{router}", - "httpMethod": "PUT", - "description": "Updates the specified Router resource with the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "router": { - "type": "string", - "description": "Name of the Router resource to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "router" - ], - "request": { - "$ref": "Router" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "routes": { - "methods": { - "delete": { - "id": "compute.routes.delete", - "path": "{project}/global/routes/{route}", - "httpMethod": "DELETE", - "description": "Deletes the specified Route resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "route": { - "type": "string", - "description": "Name of the Route resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "route" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.routes.get", - "path": "{project}/global/routes/{route}", - "httpMethod": "GET", - "description": "Returns the specified Route resource. Get a list of available routes by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "route": { - "type": "string", - "description": "Name of the Route resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "route" - ], - "response": { - "$ref": "Route" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.routes.insert", - "path": "{project}/global/routes", - "httpMethod": "POST", - "description": "Creates a Route resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "Route" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.routes.list", - "path": "{project}/global/routes", - "httpMethod": "GET", - "description": "Retrieves the list of Route resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "RouteList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.routes.testIamPermissions", - "path": "{project}/global/routes/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "snapshots": { - "methods": { - "delete": { - "id": "compute.snapshots.delete", - "path": "{project}/global/snapshots/{snapshot}", - "httpMethod": "DELETE", - "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snaphots.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "snapshot": { - "type": "string", - "description": "Name of the Snapshot resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "snapshot" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.snapshots.get", - "path": "{project}/global/snapshots/{snapshot}", - "httpMethod": "GET", - "description": "Returns the specified Snapshot resource. Get a list of available snapshots by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "snapshot": { - "type": "string", - "description": "Name of the Snapshot resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "snapshot" - ], - "response": { - "$ref": "Snapshot" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "id": "compute.snapshots.list", - "path": "{project}/global/snapshots", - "httpMethod": "GET", - "description": "Retrieves the list of Snapshot resources contained within the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "SnapshotList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setLabels": { - "id": "compute.snapshots.setLabels", - "path": "{project}/global/snapshots/{resource}/setLabels", - "httpMethod": "POST", - "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling or Tagging Resources documentation.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "GlobalSetLabelsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.snapshots.testIamPermissions", - "path": "{project}/global/snapshots/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "sslCertificates": { - "methods": { - "delete": { - "id": "compute.sslCertificates.delete", - "path": "{project}/global/sslCertificates/{sslCertificate}", - "httpMethod": "DELETE", - "description": "Deletes the specified SslCertificate resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "sslCertificate": { - "type": "string", - "description": "Name of the SslCertificate resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "sslCertificate" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.sslCertificates.get", - "path": "{project}/global/sslCertificates/{sslCertificate}", - "httpMethod": "GET", - "description": "Returns the specified SslCertificate resource. Get a list of available SSL certificates by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "sslCertificate": { - "type": "string", - "description": "Name of the SslCertificate resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "sslCertificate" - ], - "response": { - "$ref": "SslCertificate" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.sslCertificates.insert", - "path": "{project}/global/sslCertificates", - "httpMethod": "POST", - "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "SslCertificate" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.sslCertificates.list", - "path": "{project}/global/sslCertificates", - "httpMethod": "GET", - "description": "Retrieves the list of SslCertificate resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "SslCertificateList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.sslCertificates.testIamPermissions", - "path": "{project}/global/sslCertificates/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "subnetworks": { - "methods": { - "aggregatedList": { - "id": "compute.subnetworks.aggregatedList", - "path": "{project}/aggregated/subnetworks", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of subnetworks.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "SubnetworkAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.subnetworks.delete", - "path": "{project}/regions/{region}/subnetworks/{subnetwork}", - "httpMethod": "DELETE", - "description": "Deletes the specified subnetwork.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "subnetwork": { - "type": "string", - "description": "Name of the Subnetwork resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "subnetwork" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "expandIpCidrRange": { - "id": "compute.subnetworks.expandIpCidrRange", - "path": "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", - "httpMethod": "POST", - "description": "Expands the IP CIDR range of the subnetwork to a specified value.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "subnetwork": { - "type": "string", - "description": "Name of the Subnetwork resource to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "subnetwork" - ], - "request": { - "$ref": "SubnetworksExpandIpCidrRangeRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.subnetworks.get", - "path": "{project}/regions/{region}/subnetworks/{subnetwork}", - "httpMethod": "GET", - "description": "Returns the specified subnetwork. Get a list of available subnetworks list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "subnetwork": { - "type": "string", - "description": "Name of the Subnetwork resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "subnetwork" - ], - "response": { - "$ref": "Subnetwork" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getIamPolicy": { - "id": "compute.subnetworks.getIamPolicy", - "path": "{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", - "httpMethod": "GET", - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.subnetworks.insert", - "path": "{project}/regions/{region}/subnetworks", - "httpMethod": "POST", - "description": "Creates a subnetwork in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "Subnetwork" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.subnetworks.list", - "path": "{project}/regions/{region}/subnetworks", - "httpMethod": "GET", - "description": "Retrieves a list of subnetworks available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "SubnetworkList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setIamPolicy": { - "id": "compute.subnetworks.setIamPolicy", - "path": "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy", - "httpMethod": "POST", - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "Policy" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setPrivateIpGoogleAccess": { - "id": "compute.subnetworks.setPrivateIpGoogleAccess", - "path": "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", - "httpMethod": "POST", - "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Cloudpath.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "subnetwork": { - "type": "string", - "description": "Name of the Subnetwork resource.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "subnetwork" - ], - "request": { - "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.subnetworks.testIamPermissions", - "path": "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "targetHttpProxies": { - "methods": { - "delete": { - "id": "compute.targetHttpProxies.delete", - "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", - "httpMethod": "DELETE", - "description": "Deletes the specified TargetHttpProxy resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetHttpProxy": { - "type": "string", - "description": "Name of the TargetHttpProxy resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetHttpProxy" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.targetHttpProxies.get", - "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", - "httpMethod": "GET", - "description": "Returns the specified TargetHttpProxy resource. Get a list of available target HTTP proxies by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetHttpProxy": { - "type": "string", - "description": "Name of the TargetHttpProxy resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetHttpProxy" - ], - "response": { - "$ref": "TargetHttpProxy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.targetHttpProxies.insert", - "path": "{project}/global/targetHttpProxies", - "httpMethod": "POST", - "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "TargetHttpProxy" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.targetHttpProxies.list", - "path": "{project}/global/targetHttpProxies", - "httpMethod": "GET", - "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "TargetHttpProxyList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setUrlMap": { - "id": "compute.targetHttpProxies.setUrlMap", - "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", - "httpMethod": "POST", - "description": "Changes the URL map for TargetHttpProxy.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetHttpProxy": { - "type": "string", - "description": "Name of the TargetHttpProxy to set a URL map for.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetHttpProxy" - ], - "request": { - "$ref": "UrlMapReference" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.targetHttpProxies.testIamPermissions", - "path": "{project}/global/targetHttpProxies/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "targetHttpsProxies": { - "methods": { - "delete": { - "id": "compute.targetHttpsProxies.delete", - "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", - "httpMethod": "DELETE", - "description": "Deletes the specified TargetHttpsProxy resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetHttpsProxy": { - "type": "string", - "description": "Name of the TargetHttpsProxy resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetHttpsProxy" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.targetHttpsProxies.get", - "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", - "httpMethod": "GET", - "description": "Returns the specified TargetHttpsProxy resource. Get a list of available target HTTPS proxies by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetHttpsProxy": { - "type": "string", - "description": "Name of the TargetHttpsProxy resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetHttpsProxy" - ], - "response": { - "$ref": "TargetHttpsProxy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.targetHttpsProxies.insert", - "path": "{project}/global/targetHttpsProxies", - "httpMethod": "POST", - "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "TargetHttpsProxy" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.targetHttpsProxies.list", - "path": "{project}/global/targetHttpsProxies", - "httpMethod": "GET", - "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "TargetHttpsProxyList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setSslCertificates": { - "id": "compute.targetHttpsProxies.setSslCertificates", - "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", - "httpMethod": "POST", - "description": "Replaces SslCertificates for TargetHttpsProxy.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetHttpsProxy": { - "type": "string", - "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetHttpsProxy" - ], - "request": { - "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setUrlMap": { - "id": "compute.targetHttpsProxies.setUrlMap", - "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", - "httpMethod": "POST", - "description": "Changes the URL map for TargetHttpsProxy.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetHttpsProxy": { - "type": "string", - "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetHttpsProxy" - ], - "request": { - "$ref": "UrlMapReference" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.targetHttpsProxies.testIamPermissions", - "path": "{project}/global/targetHttpsProxies/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "targetInstances": { - "methods": { - "aggregatedList": { - "id": "compute.targetInstances.aggregatedList", - "path": "{project}/aggregated/targetInstances", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of target instances.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "TargetInstanceAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.targetInstances.delete", - "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", - "httpMethod": "DELETE", - "description": "Deletes the specified TargetInstance resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetInstance": { - "type": "string", - "description": "Name of the TargetInstance resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "targetInstance" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.targetInstances.get", - "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", - "httpMethod": "GET", - "description": "Returns the specified TargetInstance resource. Get a list of available target instances by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetInstance": { - "type": "string", - "description": "Name of the TargetInstance resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "targetInstance" - ], - "response": { - "$ref": "TargetInstance" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.targetInstances.insert", - "path": "{project}/zones/{zone}/targetInstances", - "httpMethod": "POST", - "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "request": { - "$ref": "TargetInstance" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.targetInstances.list", - "path": "{project}/zones/{zone}/targetInstances", - "httpMethod": "GET", - "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "response": { - "$ref": "TargetInstanceList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.targetInstances.testIamPermissions", - "path": "{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "targetPools": { - "methods": { - "addHealthCheck": { - "id": "compute.targetPools.addHealthCheck", - "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", - "httpMethod": "POST", - "description": "Adds health check URLs to a target pool.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "targetPool": { - "type": "string", - "description": "Name of the target pool to add a health check to.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "targetPool" - ], - "request": { - "$ref": "TargetPoolsAddHealthCheckRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "addInstance": { - "id": "compute.targetPools.addInstance", - "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", - "httpMethod": "POST", - "description": "Adds an instance to a target pool.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "targetPool": { - "type": "string", - "description": "Name of the TargetPool resource to add instances to.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "targetPool" - ], - "request": { - "$ref": "TargetPoolsAddInstanceRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "aggregatedList": { - "id": "compute.targetPools.aggregatedList", - "path": "{project}/aggregated/targetPools", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of target pools.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "TargetPoolAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.targetPools.delete", - "path": "{project}/regions/{region}/targetPools/{targetPool}", - "httpMethod": "DELETE", - "description": "Deletes the specified target pool.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "targetPool": { - "type": "string", - "description": "Name of the TargetPool resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "targetPool" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.targetPools.get", - "path": "{project}/regions/{region}/targetPools/{targetPool}", - "httpMethod": "GET", - "description": "Returns the specified target pool. Get a list of available target pools by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "targetPool": { - "type": "string", - "description": "Name of the TargetPool resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "targetPool" - ], - "response": { - "$ref": "TargetPool" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getHealth": { - "id": "compute.targetPools.getHealth", - "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", - "httpMethod": "POST", - "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "targetPool": { - "type": "string", - "description": "Name of the TargetPool resource to which the queried instance belongs.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "targetPool" - ], - "request": { - "$ref": "InstanceReference" - }, - "response": { - "$ref": "TargetPoolInstanceHealth" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.targetPools.insert", - "path": "{project}/regions/{region}/targetPools", - "httpMethod": "POST", - "description": "Creates a target pool in the specified project and region using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "TargetPool" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.targetPools.list", - "path": "{project}/regions/{region}/targetPools", - "httpMethod": "GET", - "description": "Retrieves a list of target pools available to the specified project and region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "TargetPoolList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "removeHealthCheck": { - "id": "compute.targetPools.removeHealthCheck", - "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", - "httpMethod": "POST", - "description": "Removes health check URL from a target pool.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "targetPool": { - "type": "string", - "description": "Name of the target pool to remove health checks from.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "targetPool" - ], - "request": { - "$ref": "TargetPoolsRemoveHealthCheckRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "removeInstance": { - "id": "compute.targetPools.removeInstance", - "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", - "httpMethod": "POST", - "description": "Removes instance URL from a target pool.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "targetPool": { - "type": "string", - "description": "Name of the TargetPool resource to remove instances from.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "targetPool" - ], - "request": { - "$ref": "TargetPoolsRemoveInstanceRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setBackup": { - "id": "compute.targetPools.setBackup", - "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", - "httpMethod": "POST", - "description": "Changes a backup target pool's configurations.", - "parameters": { - "failoverRatio": { - "type": "number", - "description": "New failoverRatio value for the target pool.", - "format": "float", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "targetPool": { - "type": "string", - "description": "Name of the TargetPool resource to set a backup pool for.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "targetPool" - ], - "request": { - "$ref": "TargetReference" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.targetPools.testIamPermissions", - "path": "{project}/regions/{region}/targetPools/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "targetSslProxies": { - "methods": { - "delete": { - "id": "compute.targetSslProxies.delete", - "path": "{project}/global/targetSslProxies/{targetSslProxy}", - "httpMethod": "DELETE", - "description": "Deletes the specified TargetSslProxy resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetSslProxy": { - "type": "string", - "description": "Name of the TargetSslProxy resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetSslProxy" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.targetSslProxies.get", - "path": "{project}/global/targetSslProxies/{targetSslProxy}", - "httpMethod": "GET", - "description": "Returns the specified TargetSslProxy resource. Get a list of available target SSL proxies by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetSslProxy": { - "type": "string", - "description": "Name of the TargetSslProxy resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetSslProxy" - ], - "response": { - "$ref": "TargetSslProxy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.targetSslProxies.insert", - "path": "{project}/global/targetSslProxies", - "httpMethod": "POST", - "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "TargetSslProxy" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.targetSslProxies.list", - "path": "{project}/global/targetSslProxies", - "httpMethod": "GET", - "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "TargetSslProxyList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setBackendService": { - "id": "compute.targetSslProxies.setBackendService", - "path": "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", - "httpMethod": "POST", - "description": "Changes the BackendService for TargetSslProxy.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetSslProxy": { - "type": "string", - "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetSslProxy" - ], - "request": { - "$ref": "TargetSslProxiesSetBackendServiceRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setProxyHeader": { - "id": "compute.targetSslProxies.setProxyHeader", - "path": "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", - "httpMethod": "POST", - "description": "Changes the ProxyHeaderType for TargetSslProxy.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetSslProxy": { - "type": "string", - "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetSslProxy" - ], - "request": { - "$ref": "TargetSslProxiesSetProxyHeaderRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setSslCertificates": { - "id": "compute.targetSslProxies.setSslCertificates", - "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", - "httpMethod": "POST", - "description": "Changes SslCertificates for TargetSslProxy.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetSslProxy": { - "type": "string", - "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetSslProxy" - ], - "request": { - "$ref": "TargetSslProxiesSetSslCertificatesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.targetSslProxies.testIamPermissions", - "path": "{project}/global/targetSslProxies/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "targetTcpProxies": { - "methods": { - "delete": { - "id": "compute.targetTcpProxies.delete", - "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", - "httpMethod": "DELETE", - "description": "Deletes the specified TargetTcpProxy resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetTcpProxy": { - "type": "string", - "description": "Name of the TargetTcpProxy resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetTcpProxy" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.targetTcpProxies.get", - "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", - "httpMethod": "GET", - "description": "Returns the specified TargetTcpProxy resource. Get a list of available target TCP proxies by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetTcpProxy": { - "type": "string", - "description": "Name of the TargetTcpProxy resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetTcpProxy" - ], - "response": { - "$ref": "TargetTcpProxy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.targetTcpProxies.insert", - "path": "{project}/global/targetTcpProxies", - "httpMethod": "POST", - "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "TargetTcpProxy" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.targetTcpProxies.list", - "path": "{project}/global/targetTcpProxies", - "httpMethod": "GET", - "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "TargetTcpProxyList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setBackendService": { - "id": "compute.targetTcpProxies.setBackendService", - "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService", - "httpMethod": "POST", - "description": "Changes the BackendService for TargetTcpProxy.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetTcpProxy": { - "type": "string", - "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetTcpProxy" - ], - "request": { - "$ref": "TargetTcpProxiesSetBackendServiceRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setProxyHeader": { - "id": "compute.targetTcpProxies.setProxyHeader", - "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader", - "httpMethod": "POST", - "description": "Changes the ProxyHeaderType for TargetTcpProxy.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetTcpProxy": { - "type": "string", - "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetTcpProxy" - ], - "request": { - "$ref": "TargetTcpProxiesSetProxyHeaderRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "targetVpnGateways": { - "methods": { - "aggregatedList": { - "id": "compute.targetVpnGateways.aggregatedList", - "path": "{project}/aggregated/targetVpnGateways", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of target VPN gateways.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "TargetVpnGatewayAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.targetVpnGateways.delete", - "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", - "httpMethod": "DELETE", - "description": "Deletes the specified target VPN gateway.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "targetVpnGateway": { - "type": "string", - "description": "Name of the target VPN gateway to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "targetVpnGateway" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.targetVpnGateways.get", - "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", - "httpMethod": "GET", - "description": "Returns the specified target VPN gateway. Get a list of available target VPN gateways by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "targetVpnGateway": { - "type": "string", - "description": "Name of the target VPN gateway to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "targetVpnGateway" - ], - "response": { - "$ref": "TargetVpnGateway" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.targetVpnGateways.insert", - "path": "{project}/regions/{region}/targetVpnGateways", - "httpMethod": "POST", - "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "TargetVpnGateway" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.targetVpnGateways.list", - "path": "{project}/regions/{region}/targetVpnGateways", - "httpMethod": "GET", - "description": "Retrieves a list of target VPN gateways available to the specified project and region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "TargetVpnGatewayList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.targetVpnGateways.testIamPermissions", - "path": "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "urlMaps": { - "methods": { - "delete": { - "id": "compute.urlMaps.delete", - "path": "{project}/global/urlMaps/{urlMap}", - "httpMethod": "DELETE", - "description": "Deletes the specified UrlMap resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "urlMap": { - "type": "string", - "description": "Name of the UrlMap resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "urlMap" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.urlMaps.get", - "path": "{project}/global/urlMaps/{urlMap}", - "httpMethod": "GET", - "description": "Returns the specified UrlMap resource. Get a list of available URL maps by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "urlMap": { - "type": "string", - "description": "Name of the UrlMap resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "urlMap" - ], - "response": { - "$ref": "UrlMap" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.urlMaps.insert", - "path": "{project}/global/urlMaps", - "httpMethod": "POST", - "description": "Creates a UrlMap resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "UrlMap" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "invalidateCache": { - "id": "compute.urlMaps.invalidateCache", - "path": "{project}/global/urlMaps/{urlMap}/invalidateCache", - "httpMethod": "POST", - "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "urlMap": { - "type": "string", - "description": "Name of the UrlMap scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "urlMap" - ], - "request": { - "$ref": "CacheInvalidationRule" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.urlMaps.list", - "path": "{project}/global/urlMaps", - "httpMethod": "GET", - "description": "Retrieves the list of UrlMap resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "UrlMapList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.urlMaps.patch", - "path": "{project}/global/urlMaps/{urlMap}", - "httpMethod": "PATCH", - "description": "Patches the specified UrlMap resource with the data included in the request. This method supports patch semantics.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "urlMap": { - "type": "string", - "description": "Name of the UrlMap resource to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "urlMap" - ], - "request": { - "$ref": "UrlMap" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.urlMaps.testIamPermissions", - "path": "{project}/global/urlMaps/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "id": "compute.urlMaps.update", - "path": "{project}/global/urlMaps/{urlMap}", - "httpMethod": "PUT", - "description": "Updates the specified UrlMap resource with the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "urlMap": { - "type": "string", - "description": "Name of the UrlMap resource to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "urlMap" - ], - "request": { - "$ref": "UrlMap" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "validate": { - "id": "compute.urlMaps.validate", - "path": "{project}/global/urlMaps/{urlMap}/validate", - "httpMethod": "POST", - "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "urlMap": { - "type": "string", - "description": "Name of the UrlMap resource to be validated as.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "urlMap" - ], - "request": { - "$ref": "UrlMapsValidateRequest" - }, - "response": { - "$ref": "UrlMapsValidateResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "vpnTunnels": { - "methods": { - "aggregatedList": { - "id": "compute.vpnTunnels.aggregatedList", - "path": "{project}/aggregated/vpnTunnels", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of VPN tunnels.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "VpnTunnelAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "id": "compute.vpnTunnels.delete", - "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", - "httpMethod": "DELETE", - "description": "Deletes the specified VpnTunnel resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "vpnTunnel": { - "type": "string", - "description": "Name of the VpnTunnel resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "vpnTunnel" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.vpnTunnels.get", - "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", - "httpMethod": "GET", - "description": "Returns the specified VpnTunnel resource. Get a list of available VPN tunnels by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "vpnTunnel": { - "type": "string", - "description": "Name of the VpnTunnel resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "vpnTunnel" - ], - "response": { - "$ref": "VpnTunnel" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.vpnTunnels.insert", - "path": "{project}/regions/{region}/vpnTunnels", - "httpMethod": "POST", - "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "request": { - "$ref": "VpnTunnel" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.vpnTunnels.list", - "path": "{project}/regions/{region}/vpnTunnels", - "httpMethod": "GET", - "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "VpnTunnelList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "id": "compute.vpnTunnels.testIamPermissions", - "path": "{project}/regions/{region}/vpnTunnels/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "zoneOperations": { - "methods": { - "delete": { - "id": "compute.zoneOperations.delete", - "path": "{project}/zones/{zone}/operations/{operation}", - "httpMethod": "DELETE", - "description": "Deletes the specified zone-specific Operations resource.", - "parameters": { - "operation": { - "type": "string", - "description": "Name of the Operations resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "operation" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.zoneOperations.get", - "path": "{project}/zones/{zone}/operations/{operation}", - "httpMethod": "GET", - "description": "Retrieves the specified zone-specific Operations resource.", - "parameters": { - "operation": { - "type": "string", - "description": "Name of the Operations resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "operation" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "id": "compute.zoneOperations.list", - "path": "{project}/zones/{zone}/operations", - "httpMethod": "GET", - "description": "Retrieves a list of Operation resources contained within the specified zone.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone for request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "response": { - "$ref": "OperationList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "zones": { - "methods": { - "get": { - "id": "compute.zones.get", - "path": "{project}/zones/{zone}", - "httpMethod": "GET", - "description": "Returns the specified Zone resource. Get a list of available zones by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone" - ], - "response": { - "$ref": "Zone" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "id": "compute.zones.list", - "path": "{project}/zones", - "httpMethod": "GET", - "description": "Retrieves the list of Zone resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "ZoneList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - } - } -} diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go deleted file mode 100644 index 6e9fa30c99a..00000000000 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +++ /dev/null @@ -1,73513 +0,0 @@ -// Package compute provides access to the Compute Engine API. -// -// See https://developers.google.com/compute/docs/reference/latest/ -// -// Usage example: -// -// import "google.golang.org/api/compute/v0.beta" -// ... -// computeService, err := compute.New(oauthHttpClient) -package compute // import "google.golang.org/api/compute/v0.beta" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" - "io" - "net/http" - "net/url" - "strconv" - "strings" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = ctxhttp.Do - -const apiId = "compute:beta" -const apiName = "compute" -const apiVersion = "beta" -const basePath = "https://www.googleapis.com/compute/beta/projects/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - - // View and manage your Google Compute Engine resources - ComputeScope = "https://www.googleapis.com/auth/compute" - - // View your Google Compute Engine resources - ComputeReadonlyScope = "https://www.googleapis.com/auth/compute.readonly" - - // Manage your data and permissions in Google Cloud Storage - DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" - - // View your data in Google Cloud Storage - DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" - - // Manage your data in Google Cloud Storage - DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" -) - -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.AcceleratorTypes = NewAcceleratorTypesService(s) - s.Addresses = NewAddressesService(s) - s.Autoscalers = NewAutoscalersService(s) - s.BackendBuckets = NewBackendBucketsService(s) - s.BackendServices = NewBackendServicesService(s) - s.DiskTypes = NewDiskTypesService(s) - s.Disks = NewDisksService(s) - s.Firewalls = NewFirewallsService(s) - s.ForwardingRules = NewForwardingRulesService(s) - s.GlobalAddresses = NewGlobalAddressesService(s) - s.GlobalForwardingRules = NewGlobalForwardingRulesService(s) - s.GlobalOperations = NewGlobalOperationsService(s) - s.HealthChecks = NewHealthChecksService(s) - s.HttpHealthChecks = NewHttpHealthChecksService(s) - s.HttpsHealthChecks = NewHttpsHealthChecksService(s) - s.Images = NewImagesService(s) - s.InstanceGroupManagers = NewInstanceGroupManagersService(s) - s.InstanceGroups = NewInstanceGroupsService(s) - s.InstanceTemplates = NewInstanceTemplatesService(s) - s.Instances = NewInstancesService(s) - s.Licenses = NewLicensesService(s) - s.MachineTypes = NewMachineTypesService(s) - s.Networks = NewNetworksService(s) - s.Projects = NewProjectsService(s) - s.RegionAutoscalers = NewRegionAutoscalersService(s) - s.RegionBackendServices = NewRegionBackendServicesService(s) - s.RegionCommitments = NewRegionCommitmentsService(s) - s.RegionInstanceGroupManagers = NewRegionInstanceGroupManagersService(s) - s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) - s.RegionOperations = NewRegionOperationsService(s) - s.Regions = NewRegionsService(s) - s.Routers = NewRoutersService(s) - s.Routes = NewRoutesService(s) - s.Snapshots = NewSnapshotsService(s) - s.SslCertificates = NewSslCertificatesService(s) - s.Subnetworks = NewSubnetworksService(s) - s.TargetHttpProxies = NewTargetHttpProxiesService(s) - s.TargetHttpsProxies = NewTargetHttpsProxiesService(s) - s.TargetInstances = NewTargetInstancesService(s) - s.TargetPools = NewTargetPoolsService(s) - s.TargetSslProxies = NewTargetSslProxiesService(s) - s.TargetTcpProxies = NewTargetTcpProxiesService(s) - s.TargetVpnGateways = NewTargetVpnGatewaysService(s) - s.UrlMaps = NewUrlMapsService(s) - s.VpnTunnels = NewVpnTunnelsService(s) - s.ZoneOperations = NewZoneOperationsService(s) - s.Zones = NewZonesService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - AcceleratorTypes *AcceleratorTypesService - - Addresses *AddressesService - - Autoscalers *AutoscalersService - - BackendBuckets *BackendBucketsService - - BackendServices *BackendServicesService - - DiskTypes *DiskTypesService - - Disks *DisksService - - Firewalls *FirewallsService - - ForwardingRules *ForwardingRulesService - - GlobalAddresses *GlobalAddressesService - - GlobalForwardingRules *GlobalForwardingRulesService - - GlobalOperations *GlobalOperationsService - - HealthChecks *HealthChecksService - - HttpHealthChecks *HttpHealthChecksService - - HttpsHealthChecks *HttpsHealthChecksService - - Images *ImagesService - - InstanceGroupManagers *InstanceGroupManagersService - - InstanceGroups *InstanceGroupsService - - InstanceTemplates *InstanceTemplatesService - - Instances *InstancesService - - Licenses *LicensesService - - MachineTypes *MachineTypesService - - Networks *NetworksService - - Projects *ProjectsService - - RegionAutoscalers *RegionAutoscalersService - - RegionBackendServices *RegionBackendServicesService - - RegionCommitments *RegionCommitmentsService - - RegionInstanceGroupManagers *RegionInstanceGroupManagersService - - RegionInstanceGroups *RegionInstanceGroupsService - - RegionOperations *RegionOperationsService - - Regions *RegionsService - - Routers *RoutersService - - Routes *RoutesService - - Snapshots *SnapshotsService - - SslCertificates *SslCertificatesService - - Subnetworks *SubnetworksService - - TargetHttpProxies *TargetHttpProxiesService - - TargetHttpsProxies *TargetHttpsProxiesService - - TargetInstances *TargetInstancesService - - TargetPools *TargetPoolsService - - TargetSslProxies *TargetSslProxiesService - - TargetTcpProxies *TargetTcpProxiesService - - TargetVpnGateways *TargetVpnGatewaysService - - UrlMaps *UrlMapsService - - VpnTunnels *VpnTunnelsService - - ZoneOperations *ZoneOperationsService - - Zones *ZonesService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewAcceleratorTypesService(s *Service) *AcceleratorTypesService { - rs := &AcceleratorTypesService{s: s} - return rs -} - -type AcceleratorTypesService struct { - s *Service -} - -func NewAddressesService(s *Service) *AddressesService { - rs := &AddressesService{s: s} - return rs -} - -type AddressesService struct { - s *Service -} - -func NewAutoscalersService(s *Service) *AutoscalersService { - rs := &AutoscalersService{s: s} - return rs -} - -type AutoscalersService struct { - s *Service -} - -func NewBackendBucketsService(s *Service) *BackendBucketsService { - rs := &BackendBucketsService{s: s} - return rs -} - -type BackendBucketsService struct { - s *Service -} - -func NewBackendServicesService(s *Service) *BackendServicesService { - rs := &BackendServicesService{s: s} - return rs -} - -type BackendServicesService struct { - s *Service -} - -func NewDiskTypesService(s *Service) *DiskTypesService { - rs := &DiskTypesService{s: s} - return rs -} - -type DiskTypesService struct { - s *Service -} - -func NewDisksService(s *Service) *DisksService { - rs := &DisksService{s: s} - return rs -} - -type DisksService struct { - s *Service -} - -func NewFirewallsService(s *Service) *FirewallsService { - rs := &FirewallsService{s: s} - return rs -} - -type FirewallsService struct { - s *Service -} - -func NewForwardingRulesService(s *Service) *ForwardingRulesService { - rs := &ForwardingRulesService{s: s} - return rs -} - -type ForwardingRulesService struct { - s *Service -} - -func NewGlobalAddressesService(s *Service) *GlobalAddressesService { - rs := &GlobalAddressesService{s: s} - return rs -} - -type GlobalAddressesService struct { - s *Service -} - -func NewGlobalForwardingRulesService(s *Service) *GlobalForwardingRulesService { - rs := &GlobalForwardingRulesService{s: s} - return rs -} - -type GlobalForwardingRulesService struct { - s *Service -} - -func NewGlobalOperationsService(s *Service) *GlobalOperationsService { - rs := &GlobalOperationsService{s: s} - return rs -} - -type GlobalOperationsService struct { - s *Service -} - -func NewHealthChecksService(s *Service) *HealthChecksService { - rs := &HealthChecksService{s: s} - return rs -} - -type HealthChecksService struct { - s *Service -} - -func NewHttpHealthChecksService(s *Service) *HttpHealthChecksService { - rs := &HttpHealthChecksService{s: s} - return rs -} - -type HttpHealthChecksService struct { - s *Service -} - -func NewHttpsHealthChecksService(s *Service) *HttpsHealthChecksService { - rs := &HttpsHealthChecksService{s: s} - return rs -} - -type HttpsHealthChecksService struct { - s *Service -} - -func NewImagesService(s *Service) *ImagesService { - rs := &ImagesService{s: s} - return rs -} - -type ImagesService struct { - s *Service -} - -func NewInstanceGroupManagersService(s *Service) *InstanceGroupManagersService { - rs := &InstanceGroupManagersService{s: s} - return rs -} - -type InstanceGroupManagersService struct { - s *Service -} - -func NewInstanceGroupsService(s *Service) *InstanceGroupsService { - rs := &InstanceGroupsService{s: s} - return rs -} - -type InstanceGroupsService struct { - s *Service -} - -func NewInstanceTemplatesService(s *Service) *InstanceTemplatesService { - rs := &InstanceTemplatesService{s: s} - return rs -} - -type InstanceTemplatesService struct { - s *Service -} - -func NewInstancesService(s *Service) *InstancesService { - rs := &InstancesService{s: s} - return rs -} - -type InstancesService struct { - s *Service -} - -func NewLicensesService(s *Service) *LicensesService { - rs := &LicensesService{s: s} - return rs -} - -type LicensesService struct { - s *Service -} - -func NewMachineTypesService(s *Service) *MachineTypesService { - rs := &MachineTypesService{s: s} - return rs -} - -type MachineTypesService struct { - s *Service -} - -func NewNetworksService(s *Service) *NetworksService { - rs := &NetworksService{s: s} - return rs -} - -type NetworksService struct { - s *Service -} - -func NewProjectsService(s *Service) *ProjectsService { - rs := &ProjectsService{s: s} - return rs -} - -type ProjectsService struct { - s *Service -} - -func NewRegionAutoscalersService(s *Service) *RegionAutoscalersService { - rs := &RegionAutoscalersService{s: s} - return rs -} - -type RegionAutoscalersService struct { - s *Service -} - -func NewRegionBackendServicesService(s *Service) *RegionBackendServicesService { - rs := &RegionBackendServicesService{s: s} - return rs -} - -type RegionBackendServicesService struct { - s *Service -} - -func NewRegionCommitmentsService(s *Service) *RegionCommitmentsService { - rs := &RegionCommitmentsService{s: s} - return rs -} - -type RegionCommitmentsService struct { - s *Service -} - -func NewRegionInstanceGroupManagersService(s *Service) *RegionInstanceGroupManagersService { - rs := &RegionInstanceGroupManagersService{s: s} - return rs -} - -type RegionInstanceGroupManagersService struct { - s *Service -} - -func NewRegionInstanceGroupsService(s *Service) *RegionInstanceGroupsService { - rs := &RegionInstanceGroupsService{s: s} - return rs -} - -type RegionInstanceGroupsService struct { - s *Service -} - -func NewRegionOperationsService(s *Service) *RegionOperationsService { - rs := &RegionOperationsService{s: s} - return rs -} - -type RegionOperationsService struct { - s *Service -} - -func NewRegionsService(s *Service) *RegionsService { - rs := &RegionsService{s: s} - return rs -} - -type RegionsService struct { - s *Service -} - -func NewRoutersService(s *Service) *RoutersService { - rs := &RoutersService{s: s} - return rs -} - -type RoutersService struct { - s *Service -} - -func NewRoutesService(s *Service) *RoutesService { - rs := &RoutesService{s: s} - return rs -} - -type RoutesService struct { - s *Service -} - -func NewSnapshotsService(s *Service) *SnapshotsService { - rs := &SnapshotsService{s: s} - return rs -} - -type SnapshotsService struct { - s *Service -} - -func NewSslCertificatesService(s *Service) *SslCertificatesService { - rs := &SslCertificatesService{s: s} - return rs -} - -type SslCertificatesService struct { - s *Service -} - -func NewSubnetworksService(s *Service) *SubnetworksService { - rs := &SubnetworksService{s: s} - return rs -} - -type SubnetworksService struct { - s *Service -} - -func NewTargetHttpProxiesService(s *Service) *TargetHttpProxiesService { - rs := &TargetHttpProxiesService{s: s} - return rs -} - -type TargetHttpProxiesService struct { - s *Service -} - -func NewTargetHttpsProxiesService(s *Service) *TargetHttpsProxiesService { - rs := &TargetHttpsProxiesService{s: s} - return rs -} - -type TargetHttpsProxiesService struct { - s *Service -} - -func NewTargetInstancesService(s *Service) *TargetInstancesService { - rs := &TargetInstancesService{s: s} - return rs -} - -type TargetInstancesService struct { - s *Service -} - -func NewTargetPoolsService(s *Service) *TargetPoolsService { - rs := &TargetPoolsService{s: s} - return rs -} - -type TargetPoolsService struct { - s *Service -} - -func NewTargetSslProxiesService(s *Service) *TargetSslProxiesService { - rs := &TargetSslProxiesService{s: s} - return rs -} - -type TargetSslProxiesService struct { - s *Service -} - -func NewTargetTcpProxiesService(s *Service) *TargetTcpProxiesService { - rs := &TargetTcpProxiesService{s: s} - return rs -} - -type TargetTcpProxiesService struct { - s *Service -} - -func NewTargetVpnGatewaysService(s *Service) *TargetVpnGatewaysService { - rs := &TargetVpnGatewaysService{s: s} - return rs -} - -type TargetVpnGatewaysService struct { - s *Service -} - -func NewUrlMapsService(s *Service) *UrlMapsService { - rs := &UrlMapsService{s: s} - return rs -} - -type UrlMapsService struct { - s *Service -} - -func NewVpnTunnelsService(s *Service) *VpnTunnelsService { - rs := &VpnTunnelsService{s: s} - return rs -} - -type VpnTunnelsService struct { - s *Service -} - -func NewZoneOperationsService(s *Service) *ZoneOperationsService { - rs := &ZoneOperationsService{s: s} - return rs -} - -type ZoneOperationsService struct { - s *Service -} - -func NewZonesService(s *Service) *ZonesService { - rs := &ZonesService{s: s} - return rs -} - -type ZonesService struct { - s *Service -} - -// AcceleratorConfig: A specification of the type and number of -// accelerator cards attached to the instance. -type AcceleratorConfig struct { - // AcceleratorCount: The number of the guest accelerator cards exposed - // to this instance. - AcceleratorCount int64 `json:"acceleratorCount,omitempty"` - - // AcceleratorType: Full or partial URL of the accelerator type resource - // to expose to this instance. - AcceleratorType string `json:"acceleratorType,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AcceleratorCount") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AcceleratorCount") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AcceleratorType: An Accelerator Type resource. -type AcceleratorType struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Deprecated: [Output Only] The deprecation status associated with this - // accelerator type. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] An optional textual description of the - // resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] The type of the resource. Always - // compute#acceleratorType for accelerator types. - Kind string `json:"kind,omitempty"` - - // MaximumCardsPerInstance: [Output Only] Maximum accelerator cards - // allowed per instance. - MaximumCardsPerInstance int64 `json:"maximumCardsPerInstance,omitempty"` - - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. - SelfLink string `json:"selfLink,omitempty"` - - // Zone: [Output Only] The name of the zone where the accelerator type - // resides, such as us-central1-a. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AcceleratorType) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorType - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AcceleratorTypeAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A map of scoped accelerator type lists. - Items map[string]AcceleratorTypesScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#acceleratorTypeAggregatedList for aggregated lists of - // accelerator types. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AcceleratorTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AcceleratorTypeList: Contains a list of accelerator types. -type AcceleratorTypeList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of AcceleratorType resources. - Items []*AcceleratorType `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#acceleratorTypeList for lists of accelerator types. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] A token used to continue a truncated - // list request. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AcceleratorTypeList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypeList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AcceleratorTypesScopedList struct { - // AcceleratorTypes: [Output Only] List of accelerator types contained - // in this scope. - AcceleratorTypes []*AcceleratorType `json:"acceleratorTypes,omitempty"` - - // Warning: [Output Only] An informational warning that appears when the - // accelerator types list is empty. - Warning *AcceleratorTypesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AcceleratorTypes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AcceleratorTypes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AcceleratorTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AcceleratorTypesScopedListWarning: [Output Only] An informational -// warning that appears when the accelerator types list is empty. -type AcceleratorTypesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*AcceleratorTypesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AcceleratorTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AcceleratorTypesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AcceleratorTypesScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AccessConfig: An access configuration attached to an instance's -// network interface. Only one access config per instance is supported. -type AccessConfig struct { - // Kind: [Output Only] Type of the resource. Always compute#accessConfig - // for access configs. - Kind string `json:"kind,omitempty"` - - // Name: The name of this access configuration. The default and - // recommended name is External NAT but you can use any arbitrary string - // you would like. For example, My external IP or Network Access. - Name string `json:"name,omitempty"` - - // NatIP: An external IP address associated with this instance. Specify - // an unused static external IP address available to the project or - // leave this field undefined to use an IP from a shared ephemeral IP - // address pool. If you specify a static external IP address, it must - // live in the same region as the zone of the instance. - NatIP string `json:"natIP,omitempty"` - - // Type: The type of configuration. The default and only option is - // ONE_TO_ONE_NAT. - // - // Possible values: - // "ONE_TO_ONE_NAT" (default) - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AccessConfig) MarshalJSON() ([]byte, error) { - type noMethod AccessConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Address: A reserved address resource. -type Address struct { - // Address: The static external IP address represented by this resource. - Address string `json:"address,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // IpVersion: The IP Version that will be used by this address. Valid - // options are IPV4 or IPV6. This can only be specified for a global - // address. - // - // Possible values: - // "IPV4" - // "IPV6" - // "UNSPECIFIED_VERSION" - IpVersion string `json:"ipVersion,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#address for - // addresses. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Region: [Output Only] URL of the region where the regional address - // resides. This field is not applicable to global addresses. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] The status of the address, which can be either - // IN_USE or RESERVED. An address that is RESERVED is currently reserved - // and available to use. An IN_USE address is currently being used by - // another resource and is not available. - // - // Possible values: - // "IN_USE" - // "RESERVED" - Status string `json:"status,omitempty"` - - // Users: [Output Only] The URLs of the resources that are using this - // address. - Users []string `json:"users,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Address") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Address") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Address) MarshalJSON() ([]byte, error) { - type noMethod Address - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AddressAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A map of scoped address lists. - Items map[string]AddressesScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#addressAggregatedList for aggregated lists of addresses. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AddressAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AddressAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AddressList: Contains a list of addresses. -type AddressList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of addresses. - Items []*Address `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#addressList for - // lists of addresses. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AddressList) MarshalJSON() ([]byte, error) { - type noMethod AddressList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AddressesScopedList struct { - // Addresses: [Output Only] List of addresses contained in this scope. - Addresses []*Address `json:"addresses,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of addresses when the list is empty. - Warning *AddressesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Addresses") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Addresses") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AddressesScopedListWarning: [Output Only] Informational warning which -// replaces the list of addresses when the list is empty. -type AddressesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*AddressesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AddressesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AddressesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AddressesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AliasIpRange: An alias IP range attached to an instance's network -// interface. -type AliasIpRange struct { - // IpCidrRange: The IP CIDR range represented by this alias IP range. - // This IP CIDR range must belong to the specified subnetwork and cannot - // contain IP addresses reserved by system or used by other network - // interfaces. This range may be a single IP address (e.g. 10.2.3.4), a - // netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24). - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // SubnetworkRangeName: Optional subnetwork secondary range name - // specifying the secondary range from which to allocate the IP CIDR - // range for this alias IP range. If left unspecified, the primary range - // of the subnetwork will be used. - SubnetworkRangeName string `json:"subnetworkRangeName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AliasIpRange) MarshalJSON() ([]byte, error) { - type noMethod AliasIpRange - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AttachedDisk: An instance-attached disk resource. -type AttachedDisk struct { - // AutoDelete: Specifies whether the disk will be auto-deleted when the - // instance is deleted (but not when the disk is detached from the - // instance). - AutoDelete bool `json:"autoDelete,omitempty"` - - // Boot: Indicates that this is a boot disk. The virtual machine will - // use the first partition of the disk for its root filesystem. - Boot bool `json:"boot,omitempty"` - - // DeviceName: Specifies a unique device name of your choice that is - // reflected into the /dev/disk/by-id/google-* tree of a Linux operating - // system running within the instance. This name can be used to - // reference the device for mounting, resizing, and so on, from within - // the instance. - // - // If not specified, the server chooses a default device name to apply - // to this disk, in the form persistent-disks-x, where x is a number - // assigned by Google Compute Engine. This field is only applicable for - // persistent disks. - DeviceName string `json:"deviceName,omitempty"` - - // DiskEncryptionKey: Encrypts or decrypts a disk using a - // customer-supplied encryption key. - // - // If you are creating a new disk, this field encrypts the new disk - // using an encryption key that you provide. If you are attaching an - // existing disk that is already encrypted, this field decrypts the disk - // using the customer-supplied encryption key. - // - // If you encrypt a disk using a customer-supplied key, you must provide - // the same key again when you attempt to use this resource at a later - // time. For example, you must provide the key when you create a - // snapshot or an image from the disk or when you attach the disk to a - // virtual machine instance. - // - // If you do not provide an encryption key, then the disk will be - // encrypted using an automatically generated key and you do not need to - // provide a key to use the disk later. - // - // Instance templates do not store customer-supplied encryption keys, so - // you cannot use your own keys to encrypt disks in a managed instance - // group. - DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` - - // Index: Assigns a zero-based index to this disk, where 0 is reserved - // for the boot disk. For example, if you have many disks attached to an - // instance, each disk would have a unique index number. If not - // specified, the server will choose an appropriate value. - Index int64 `json:"index,omitempty"` - - // InitializeParams: [Input Only] Specifies the parameters for a new - // disk that will be created alongside the new instance. Use - // initialization parameters to create boot disks or local SSDs attached - // to the new instance. - // - // This property is mutually exclusive with the source property; you can - // only define one or the other, but not both. - InitializeParams *AttachedDiskInitializeParams `json:"initializeParams,omitempty"` - - // Interface: Specifies the disk interface to use for attaching this - // disk, which is either SCSI or NVME. The default is SCSI. Persistent - // disks must always use SCSI and the request will fail if you attempt - // to attach a persistent disk in any other format than SCSI. Local SSDs - // can use either NVME or SCSI. For performance characteristics of SCSI - // over NVMe, see Local SSD performance. - // - // Possible values: - // "NVME" - // "SCSI" - Interface string `json:"interface,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#attachedDisk - // for attached disks. - Kind string `json:"kind,omitempty"` - - // Licenses: [Output Only] Any valid publicly visible licenses. - Licenses []string `json:"licenses,omitempty"` - - // Mode: The mode in which to attach this disk, either READ_WRITE or - // READ_ONLY. If not specified, the default is to attach the disk in - // READ_WRITE mode. - // - // Possible values: - // "READ_ONLY" - // "READ_WRITE" - Mode string `json:"mode,omitempty"` - - // Source: Specifies a valid partial or full URL to an existing - // Persistent Disk resource. When creating a new instance, one of - // initializeParams.sourceImage or disks.source is required. - // - // If desired, you can also attach existing non-root persistent disks - // using this property. This field is only applicable for persistent - // disks. - // - // Note that for InstanceTemplate, specify the disk name, not the URL - // for the disk. - Source string `json:"source,omitempty"` - - // Type: Specifies the type of the disk, either SCRATCH or PERSISTENT. - // If not specified, the default is PERSISTENT. - // - // Possible values: - // "PERSISTENT" - // "SCRATCH" - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AutoDelete") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutoDelete") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AttachedDisk) MarshalJSON() ([]byte, error) { - type noMethod AttachedDisk - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AttachedDiskInitializeParams: [Input Only] Specifies the parameters -// for a new disk that will be created alongside the new instance. Use -// initialization parameters to create boot disks or local SSDs attached -// to the new instance. -// -// This property is mutually exclusive with the source property; you can -// only define one or the other, but not both. -type AttachedDiskInitializeParams struct { - // DiskName: Specifies the disk name. If not specified, the default is - // to use the name of the instance. - DiskName string `json:"diskName,omitempty"` - - // DiskSizeGb: Specifies the size of the disk in base-2 GB. - DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - - // DiskStorageType: [Deprecated] Storage type of the disk. - // - // Possible values: - // "HDD" - // "SSD" - DiskStorageType string `json:"diskStorageType,omitempty"` - - // DiskType: Specifies the disk type to use to create the instance. If - // not specified, the default is pd-standard, specified using the full - // URL. For - // example: - // - // https://www.googleapis.com/compute/v1/projects/project/zones - // /zone/diskTypes/pd-standard - // - // Other values include pd-ssd and local-ssd. If you define this field, - // you can provide either the full or partial URL. For example, the - // following are valid values: - // - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType - // - projects/project/zones/zone/diskTypes/diskType - // - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this - // is the name of the disk type, not URL. - DiskType string `json:"diskType,omitempty"` - - // SourceImage: The source image to create this disk. When creating a - // new instance, one of initializeParams.sourceImage or disks.source is - // required. - // - // To create a disk with one of the public operating system images, - // specify the image by its family name. For example, specify - // family/debian-8 to use the latest Debian 8 - // image: - // - // projects/debian-cloud/global/images/family/debian-8 - // - // Alternatively, use a specific version of a public operating system - // image: - // - // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD - // - // To create a disk with a private image that you created, specify the - // image name in the following format: - // - // global/images/my-private-image - // - // You can also specify a private image by its image family, which - // returns the latest version of the image in that family. Replace the - // image name with - // family/family-name: - // - // global/images/family/my-private-family - // - // If the source image is deleted later, this field will not be set. - SourceImage string `json:"sourceImage,omitempty"` - - // SourceImageEncryptionKey: The customer-supplied encryption key of the - // source image. Required if the source image is protected by a - // customer-supplied encryption key. - // - // Instance templates do not store customer-supplied encryption keys, so - // you cannot create disks for instances in a managed instance group if - // the source images are encrypted with your own keys. - SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DiskName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DiskName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { - type noMethod AttachedDiskInitializeParams - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AuditConfig: Specifies the audit configuration for a service. The -// configuration determines which permission types are logged, and what -// identities, if any, are exempted from logging. An AuditConfig must -// have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, the union of the two AuditConfigs is used for that service: -// the log_types specified in each AuditConfig are enabled, and the -// exempted_members in each AuditConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { "audit_configs": [ { "service": "allServices" "audit_log_configs": -// [ { "log_type": "DATA_READ", "exempted_members": [ -// "user:foo@gmail.com" ] }, { "log_type": "DATA_WRITE", }, { -// "log_type": "ADMIN_READ", } ] }, { "service": -// "fooservice.googleapis.com" "audit_log_configs": [ { "log_type": -// "DATA_READ", }, { "log_type": "DATA_WRITE", "exempted_members": [ -// "user:bar@gmail.com" ] } ] } ] } -// -// For fooservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ logging. It also exempts foo@gmail.com from DATA_READ -// logging, and bar@gmail.com from DATA_WRITE logging. -type AuditConfig struct { - // AuditLogConfigs: The configuration for logging of each type of - // permission. - AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - - ExemptedMembers []string `json:"exemptedMembers,omitempty"` - - // Service: Specifies a service that will be enabled for audit logging. - // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. - // `allServices` is a special value that covers all services. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AuditLogConfigs") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AuditLogConfigs") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AuditConfig) MarshalJSON() ([]byte, error) { - type noMethod AuditConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AuditLogConfig: Provides the configuration for logging a type of -// permissions. Example: -// -// { "audit_log_configs": [ { "log_type": "DATA_READ", -// "exempted_members": [ "user:foo@gmail.com" ] }, { "log_type": -// "DATA_WRITE", } ] } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting -// foo@gmail.com from DATA_READ logging. -type AuditLogConfig struct { - // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of permission. Follows the same format of - // [Binding.members][]. - ExemptedMembers []string `json:"exemptedMembers,omitempty"` - - // LogType: The log type that this config enables. - // - // Possible values: - // "ADMIN_READ" - // "DATA_READ" - // "DATA_WRITE" - // "LOG_TYPE_UNSPECIFIED" - LogType string `json:"logType,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ExemptedMembers") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ExemptedMembers") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { - type noMethod AuditLogConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Autoscaler: Represents an Autoscaler resource. Autoscalers allow you -// to automatically scale virtual machine instances in managed instance -// groups according to an autoscaling policy that you define. For more -// information, read Autoscaling Groups of Instances. -type Autoscaler struct { - // AutoscalingPolicy: The configuration parameters for the autoscaling - // algorithm. You can define one or more of the policies for an - // autoscaler: cpuUtilization, customMetricUtilizations, and - // loadBalancingUtilization. - // - // If none of these are specified, the default will be to autoscale - // based on cpuUtilization to 0.6 or 60%. - AutoscalingPolicy *AutoscalingPolicy `json:"autoscalingPolicy,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#autoscaler - // for autoscalers. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Region: [Output Only] URL of the region where the instance group - // resides (for autoscalers living in regional scope). - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] The status of the autoscaler configuration. - // - // Possible values: - // "ACTIVE" - // "DELETING" - // "ERROR" - // "PENDING" - Status string `json:"status,omitempty"` - - // StatusDetails: [Output Only] Human-readable details about the current - // state of the autoscaler. Read the documentation for Commonly returned - // status messages for examples of status messages you might encounter. - StatusDetails []*AutoscalerStatusDetails `json:"statusDetails,omitempty"` - - // Target: URL of the managed instance group that this autoscaler will - // scale. - Target string `json:"target,omitempty"` - - // Zone: [Output Only] URL of the zone where the instance group resides - // (for autoscalers living in zonal scope). - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "AutoscalingPolicy") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutoscalingPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Autoscaler) MarshalJSON() ([]byte, error) { - type noMethod Autoscaler - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AutoscalerAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A map of scoped autoscaler lists. - Items map[string]AutoscalersScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#autoscalerAggregatedList for aggregated lists of autoscalers. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AutoscalerList: Contains a list of Autoscaler resources. -type AutoscalerList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of Autoscaler resources. - Items []*Autoscaler `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#autoscalerList - // for lists of autoscalers. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AutoscalerList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AutoscalerStatusDetails struct { - // Message: The status message. - Message string `json:"message,omitempty"` - - // Type: The type of error returned. - // - // Possible values: - // "ALL_INSTANCES_UNHEALTHY" - // "BACKEND_SERVICE_DOES_NOT_EXIST" - // "CAPPED_AT_MAX_NUM_REPLICAS" - // "CUSTOM_METRIC_DATA_POINTS_TOO_SPARSE" - // "CUSTOM_METRIC_INVALID" - // "MIN_EQUALS_MAX" - // "MISSING_CUSTOM_METRIC_DATA_POINTS" - // "MISSING_LOAD_BALANCING_DATA_POINTS" - // "MORE_THAN_ONE_BACKEND_SERVICE" - // "NOT_ENOUGH_QUOTA_AVAILABLE" - // "SCALING_TARGET_DOES_NOT_EXIST" - // "UNKNOWN" - // "UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION" - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Message") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Message") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AutoscalerStatusDetails) MarshalJSON() ([]byte, error) { - type noMethod AutoscalerStatusDetails - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AutoscalersScopedList struct { - // Autoscalers: [Output Only] List of autoscalers contained in this - // scope. - Autoscalers []*Autoscaler `json:"autoscalers,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of autoscalers when the list is empty. - Warning *AutoscalersScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Autoscalers") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Autoscalers") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AutoscalersScopedList) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AutoscalersScopedListWarning: [Output Only] Informational warning -// which replaces the list of autoscalers when the list is empty. -type AutoscalersScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*AutoscalersScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AutoscalersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AutoscalersScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod AutoscalersScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AutoscalingPolicy: Cloud Autoscaler policy. -type AutoscalingPolicy struct { - // CoolDownPeriodSec: The number of seconds that the autoscaler should - // wait before it starts collecting information from a new instance. - // This prevents the autoscaler from collecting information when the - // instance is initializing, during which the collected usage would not - // be reliable. The default time autoscaler waits is 60 - // seconds. - // - // Virtual machine initialization times might vary because of numerous - // factors. We recommend that you test how long an instance may take to - // initialize. To do this, create an instance and time the startup - // process. - CoolDownPeriodSec int64 `json:"coolDownPeriodSec,omitempty"` - - // CpuUtilization: Defines the CPU utilization policy that allows the - // autoscaler to scale based on the average CPU utilization of a managed - // instance group. - CpuUtilization *AutoscalingPolicyCpuUtilization `json:"cpuUtilization,omitempty"` - - // CustomMetricUtilizations: Configuration parameters of autoscaling - // based on a custom metric. - CustomMetricUtilizations []*AutoscalingPolicyCustomMetricUtilization `json:"customMetricUtilizations,omitempty"` - - // LoadBalancingUtilization: Configuration parameters of autoscaling - // based on load balancer. - LoadBalancingUtilization *AutoscalingPolicyLoadBalancingUtilization `json:"loadBalancingUtilization,omitempty"` - - // MaxNumReplicas: The maximum number of instances that the autoscaler - // can scale up to. This is required when creating or updating an - // autoscaler. The maximum number of replicas should not be lower than - // minimal number of replicas. - MaxNumReplicas int64 `json:"maxNumReplicas,omitempty"` - - // MinNumReplicas: The minimum number of replicas that the autoscaler - // can scale down to. This cannot be less than 0. If not provided, - // autoscaler will choose a default value depending on maximum number of - // instances allowed. - MinNumReplicas int64 `json:"minNumReplicas,omitempty"` - - // ForceSendFields is a list of field names (e.g. "CoolDownPeriodSec") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CoolDownPeriodSec") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AutoscalingPolicyCpuUtilization: CPU utilization policy. -type AutoscalingPolicyCpuUtilization struct { - // UtilizationTarget: The target CPU utilization that the autoscaler - // should maintain. Must be a float value in the range (0, 1]. If not - // specified, the default is 0.6. - // - // If the CPU level is below the target utilization, the autoscaler - // scales down the number of instances until it reaches the minimum - // number of instances you specified or until the average CPU of your - // instances reaches the target utilization. - // - // If the average CPU is above the target utilization, the autoscaler - // scales up until it reaches the maximum number of instances you - // specified or until the average utilization reaches the target - // utilization. - UtilizationTarget float64 `json:"utilizationTarget,omitempty"` - - // ForceSendFields is a list of field names (e.g. "UtilizationTarget") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "UtilizationTarget") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AutoscalingPolicyCpuUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyCpuUtilization - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *AutoscalingPolicyCpuUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyCpuUtilization - var s1 struct { - UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.UtilizationTarget = float64(s1.UtilizationTarget) - return nil -} - -// AutoscalingPolicyCustomMetricUtilization: Custom utilization metric -// policy. -type AutoscalingPolicyCustomMetricUtilization struct { - // Metric: The identifier of the Stackdriver Monitoring metric. The - // metric cannot have negative values and should be a utilization - // metric, which means that the number of virtual machines handling - // requests should increase or decrease proportionally to the metric. - // The metric must also have a label of - // compute.googleapis.com/resource_id with the value of the instance's - // unique ID, although this alone does not guarantee that the metric is - // valid. - // - // For example, the following is a valid - // metric: - // compute.googleapis.com/instance/network/received_bytes_count - // T - // he following is not a valid metric because it does not increase or - // decrease based on - // usage: - // compute.googleapis.com/instance/cpu/reserved_cores - Metric string `json:"metric,omitempty"` - - // UtilizationTarget: Target value of the metric which autoscaler should - // maintain. Must be a positive value. - UtilizationTarget float64 `json:"utilizationTarget,omitempty"` - - // UtilizationTargetType: Defines how target utilization value is - // expressed for a Stackdriver Monitoring metric. Either GAUGE, - // DELTA_PER_SECOND, or DELTA_PER_MINUTE. If not specified, the default - // is GAUGE. - // - // Possible values: - // "DELTA_PER_MINUTE" - // "DELTA_PER_SECOND" - // "GAUGE" - UtilizationTargetType string `json:"utilizationTargetType,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Metric") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Metric") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AutoscalingPolicyCustomMetricUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyCustomMetricUtilization - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *AutoscalingPolicyCustomMetricUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyCustomMetricUtilization - var s1 struct { - UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.UtilizationTarget = float64(s1.UtilizationTarget) - return nil -} - -// AutoscalingPolicyLoadBalancingUtilization: Configuration parameters -// of autoscaling based on load balancing. -type AutoscalingPolicyLoadBalancingUtilization struct { - // UtilizationTarget: Fraction of backend capacity utilization (set in - // HTTP(s) load balancing configuration) that autoscaler should - // maintain. Must be a positive float value. If not defined, the default - // is 0.8. - UtilizationTarget float64 `json:"utilizationTarget,omitempty"` - - // ForceSendFields is a list of field names (e.g. "UtilizationTarget") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "UtilizationTarget") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AutoscalingPolicyLoadBalancingUtilization) MarshalJSON() ([]byte, error) { - type noMethod AutoscalingPolicyLoadBalancingUtilization - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *AutoscalingPolicyLoadBalancingUtilization) UnmarshalJSON(data []byte) error { - type noMethod AutoscalingPolicyLoadBalancingUtilization - var s1 struct { - UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.UtilizationTarget = float64(s1.UtilizationTarget) - return nil -} - -// Backend: Message containing information of one individual backend. -type Backend struct { - // BalancingMode: Specifies the balancing mode for this backend. For - // global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. - // Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for - // TCP/SSL). - // - // This cannot be used for internal load balancing. - // - // Possible values: - // "CONNECTION" - // "RATE" - // "UTILIZATION" - BalancingMode string `json:"balancingMode,omitempty"` - - // CapacityScaler: A multiplier applied to the group's maximum servicing - // capacity (based on UTILIZATION, RATE or CONNECTION). Default value is - // 1, which means the group will serve up to 100% of its configured - // capacity (depending on balancingMode). A setting of 0 means the group - // is completely drained, offering 0% of its available Capacity. Valid - // range is [0.0,1.0]. - // - // This cannot be used for internal load balancing. - CapacityScaler float64 `json:"capacityScaler,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Group: The fully-qualified URL of a zonal Instance Group resource. - // This instance group defines the list of instances that serve traffic. - // Member virtual machine instances from each instance group must live - // in the same zone as the instance group itself. No two backends in a - // backend service are allowed to use same Instance Group - // resource. - // - // Note that you must specify an Instance Group resource using the - // fully-qualified URL, rather than a partial URL. - // - // When the BackendService has load balancing scheme INTERNAL, the - // instance group must be in a zone within the same region as the - // BackendService. - Group string `json:"group,omitempty"` - - // MaxConnections: The max number of simultaneous connections for the - // group. Can be used with either CONNECTION or UTILIZATION balancing - // modes. For CONNECTION mode, either maxConnections or - // maxConnectionsPerInstance must be set. - // - // This cannot be used for internal load balancing. - MaxConnections int64 `json:"maxConnections,omitempty"` - - // MaxConnectionsPerInstance: The max number of simultaneous connections - // that a single backend instance can handle. This is used to calculate - // the capacity of the group. Can be used in either CONNECTION or - // UTILIZATION balancing modes. For CONNECTION mode, either - // maxConnections or maxConnectionsPerInstance must be set. - // - // This cannot be used for internal load balancing. - MaxConnectionsPerInstance int64 `json:"maxConnectionsPerInstance,omitempty"` - - // MaxRate: The max requests per second (RPS) of the group. Can be used - // with either RATE or UTILIZATION balancing modes, but required if RATE - // mode. For RATE mode, either maxRate or maxRatePerInstance must be - // set. - // - // This cannot be used for internal load balancing. - MaxRate int64 `json:"maxRate,omitempty"` - - // MaxRatePerInstance: The max requests per second (RPS) that a single - // backend instance can handle. This is used to calculate the capacity - // of the group. Can be used in either balancing mode. For RATE mode, - // either maxRate or maxRatePerInstance must be set. - // - // This cannot be used for internal load balancing. - MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"` - - // MaxUtilization: Used when balancingMode is UTILIZATION. This ratio - // defines the CPU utilization target for the group. The default is 0.8. - // Valid range is [0.0, 1.0]. - // - // This cannot be used for internal load balancing. - MaxUtilization float64 `json:"maxUtilization,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BalancingMode") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BalancingMode") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Backend) MarshalJSON() ([]byte, error) { - type noMethod Backend - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *Backend) UnmarshalJSON(data []byte) error { - type noMethod Backend - var s1 struct { - CapacityScaler gensupport.JSONFloat64 `json:"capacityScaler"` - MaxRatePerInstance gensupport.JSONFloat64 `json:"maxRatePerInstance"` - MaxUtilization gensupport.JSONFloat64 `json:"maxUtilization"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.CapacityScaler = float64(s1.CapacityScaler) - s.MaxRatePerInstance = float64(s1.MaxRatePerInstance) - s.MaxUtilization = float64(s1.MaxUtilization) - return nil -} - -// BackendBucket: A BackendBucket resource. This resource defines a -// Cloud Storage bucket. -type BackendBucket struct { - // BucketName: Cloud Storage bucket name. - BucketName string `json:"bucketName,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional textual description of the resource; - // provided by the client when the resource is created. - Description string `json:"description,omitempty"` - - // EnableCdn: If true, enable Cloud CDN for this BackendBucket. - EnableCdn bool `json:"enableCdn,omitempty"` - - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: Type of the resource. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "BucketName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BucketName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BackendBucket) MarshalJSON() ([]byte, error) { - type noMethod BackendBucket - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BackendBucketList: Contains a list of BackendBucket resources. -type BackendBucketList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of BackendBucket resources. - Items []*BackendBucket `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] A token used to continue a truncated - // list request. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BackendBucketList) MarshalJSON() ([]byte, error) { - type noMethod BackendBucketList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BackendService: A BackendService resource. This resource defines a -// group of backend virtual machines and their serving capacity. -type BackendService struct { - // AffinityCookieTtlSec: Lifetime of cookies in seconds if - // session_affinity is GENERATED_COOKIE. If set to 0, the cookie is - // non-persistent and lasts only until the end of the browser session - // (or equivalent). The maximum allowed value for TTL is one day. - // - // When the load balancing scheme is INTERNAL, this field is not used. - AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` - - // Backends: The list of backends that serve this BackendService. - Backends []*Backend `json:"backends,omitempty"` - - // CdnPolicy: Cloud CDN configuration for this BackendService. - CdnPolicy *BackendServiceCdnPolicy `json:"cdnPolicy,omitempty"` - - ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // EnableCDN: If true, enable Cloud CDN for this BackendService. - // - // When the load balancing scheme is INTERNAL, this field is not used. - EnableCDN bool `json:"enableCDN,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a BackendService. An up-to-date - // fingerprint must be provided in order to update the BackendService. - Fingerprint string `json:"fingerprint,omitempty"` - - // HealthChecks: The list of URLs to the HttpHealthCheck or - // HttpsHealthCheck resource for health checking this BackendService. - // Currently at most one health check can be specified, and a health - // check is required. - // - // For internal load balancing, a URL to a HealthCheck resource must be - // specified instead. - HealthChecks []string `json:"healthChecks,omitempty"` - - Iap *BackendServiceIAP `json:"iap,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#backendService - // for backend services. - Kind string `json:"kind,omitempty"` - - // Possible values: - // "EXTERNAL" - // "INTERNAL" - // "INVALID_LOAD_BALANCING_SCHEME" - LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Port: Deprecated in favor of portName. The TCP port to connect on the - // backend. The default value is 80. - // - // This cannot be used for internal load balancing. - Port int64 `json:"port,omitempty"` - - // PortName: Name of backend port. The same name should appear in the - // instance groups referenced by this service. Required when the load - // balancing scheme is EXTERNAL. - // - // When the load balancing scheme is INTERNAL, this field is not used. - PortName string `json:"portName,omitempty"` - - // Protocol: The protocol this BackendService uses to communicate with - // backends. - // - // Possible values are HTTP, HTTPS, TCP, and SSL. The default is - // HTTP. - // - // For internal load balancing, the possible values are TCP and UDP, and - // the default is TCP. - // - // Possible values: - // "HTTP" - // "HTTPS" - // "SSL" - // "TCP" - // "UDP" - Protocol string `json:"protocol,omitempty"` - - // Region: [Output Only] URL of the region where the regional backend - // service resides. This field is not applicable to global backend - // services. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SessionAffinity: Type of session affinity to use. The default is - // NONE. - // - // When the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, - // or GENERATED_COOKIE. - // - // When the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, - // CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO. - // - // When the protocol is UDP, this field is not used. - // - // Possible values: - // "CLIENT_IP" - // "CLIENT_IP_PORT_PROTO" - // "CLIENT_IP_PROTO" - // "GENERATED_COOKIE" - // "NONE" - SessionAffinity string `json:"sessionAffinity,omitempty"` - - // TimeoutSec: How many seconds to wait for the backend before - // considering it a failed request. Default is 30 seconds. - TimeoutSec int64 `json:"timeoutSec,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. - // "AffinityCookieTtlSec") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AffinityCookieTtlSec") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *BackendService) MarshalJSON() ([]byte, error) { - type noMethod BackendService - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BackendServiceAggregatedList: Contains a list of -// BackendServicesScopedList. -type BackendServiceAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A map of scoped BackendService lists. - Items map[string]BackendServicesScopedList `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] A token used to continue a truncated - // list request. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BackendServiceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BackendServiceCdnPolicy: Message containing Cloud CDN configuration -// for a backend service. -type BackendServiceCdnPolicy struct { - // CacheKeyPolicy: The CacheKeyPolicy for this CdnPolicy. - CacheKeyPolicy *CacheKeyPolicy `json:"cacheKeyPolicy,omitempty"` - - // ForceSendFields is a list of field names (e.g. "CacheKeyPolicy") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CacheKeyPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceCdnPolicy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type BackendServiceGroupHealth struct { - HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#backendServiceGroupHealth for the health of backend services. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "HealthStatus") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HealthStatus") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceGroupHealth - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BackendServiceIAP: Identity-Aware Proxy -type BackendServiceIAP struct { - Enabled bool `json:"enabled,omitempty"` - - Oauth2ClientId string `json:"oauth2ClientId,omitempty"` - - Oauth2ClientSecret string `json:"oauth2ClientSecret,omitempty"` - - Oauth2ClientSecretSha256 string `json:"oauth2ClientSecretSha256,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Enabled") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BackendServiceIAP) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceIAP - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BackendServiceList: Contains a list of BackendService resources. -type BackendServiceList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of BackendService resources. - Items []*BackendService `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#backendServiceList for lists of backend services. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BackendServiceList) MarshalJSON() ([]byte, error) { - type noMethod BackendServiceList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type BackendServicesScopedList struct { - // BackendServices: List of BackendServices contained in this scope. - BackendServices []*BackendService `json:"backendServices,omitempty"` - - // Warning: Informational warning which replaces the list of backend - // services when the list is empty. - Warning *BackendServicesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BackendServices") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BackendServices") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *BackendServicesScopedList) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// BackendServicesScopedListWarning: Informational warning which -// replaces the list of backend services when the list is empty. -type BackendServicesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*BackendServicesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BackendServicesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type BackendServicesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod BackendServicesScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Binding: Associates `members` with a `role`. -type Binding struct { - // Members: Specifies the identities requesting access for a Cloud - // Platform resource. `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is on - // the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google account. For example, `alice@gmail.com` or - // `joe@example.com`. - // - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. - // - // * `domain:{domain}`: A Google Apps domain name that represents all - // the users of that domain. For example, `google.com` or `example.com`. - Members []string `json:"members,omitempty"` - - // Role: Role that is assigned to `members`. For example, - // `roles/viewer`, `roles/editor`, or `roles/owner`. - Role string `json:"role,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Members") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Members") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Binding) MarshalJSON() ([]byte, error) { - type noMethod Binding - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type CacheInvalidationRule struct { - // Host: If set, this invalidation rule will only apply to requests with - // a Host header matching host. - Host string `json:"host,omitempty"` - - Path string `json:"path,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Host") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Host") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CacheInvalidationRule) MarshalJSON() ([]byte, error) { - type noMethod CacheInvalidationRule - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CacheKeyPolicy: Message containing what to include in the cache key -// for a request for Cloud CDN. -type CacheKeyPolicy struct { - // IncludeHost: If true, requests to different hosts will be cached - // separately. - IncludeHost bool `json:"includeHost,omitempty"` - - // IncludeProtocol: If true, http and https requests will be cached - // separately. - IncludeProtocol bool `json:"includeProtocol,omitempty"` - - // IncludeQueryString: If true, include query string parameters in the - // cache key according to query_string_whitelist and - // query_string_blacklist. If neither is set, the entire query string - // will be included. If false, the query string will be excluded from - // the cache key entirely. - IncludeQueryString bool `json:"includeQueryString,omitempty"` - - // QueryStringBlacklist: Names of query string parameters to exclude in - // cache keys. All other parameters will be included. Either specify - // query_string_whitelist or query_string_blacklist, not both. '&' and - // '=' will be percent encoded and not treated as delimiters. - QueryStringBlacklist []string `json:"queryStringBlacklist,omitempty"` - - // QueryStringWhitelist: Names of query string parameters to include in - // cache keys. All other parameters will be excluded. Either specify - // query_string_whitelist or query_string_blacklist, not both. '&' and - // '=' will be percent encoded and not treated as delimiters. - QueryStringWhitelist []string `json:"queryStringWhitelist,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IncludeHost") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IncludeHost") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { - type noMethod CacheKeyPolicy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Commitment: A usage-commitment with a start / end time. Users create -// commitments for particular resources (e.g. memory). Actual usage is -// first deducted from available commitments made prior, perhaps at a -// reduced price (as laid out in the commitment). -type Commitment struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // EndTimestamp: [Output Only] Commitment end time in RFC3339 text - // format. - EndTimestamp string `json:"endTimestamp,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#commitment - // for commitments. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Plan: The plan for this commitment, which determines duration and - // discount rate. The currently supported plans are TWELVE_MONTH (1 - // year), and THIRTY_SIX_MONTH (3 years). - // - // Possible values: - // "INVALID" - // "THIRTY_SIX_MONTH" - // "TWELVE_MONTH" - Plan string `json:"plan,omitempty"` - - // Region: [Output Only] URL of the region where this commitment may be - // used. - Region string `json:"region,omitempty"` - - // Resources: List of commitment amounts for particular resources. Note - // that VCPU and MEMORY resource commitments must occur together. - Resources []*ResourceCommitment `json:"resources,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // StartTimestamp: [Output Only] Commitment start time in RFC3339 text - // format. - StartTimestamp string `json:"startTimestamp,omitempty"` - - // Status: [Output Only] Status of the commitment with regards to - // eventual expiration (each commitment has an end-date defined). One of - // the following values: NOT_YET_ACTIVE, ACTIVE, EXPIRED. - // - // Possible values: - // "ACTIVE" - // "CREATING" - // "EXPIRED" - // "NOT_YET_ACTIVE" - Status string `json:"status,omitempty"` - - // StatusMessage: [Output Only] An optional, human-readable explanation - // of the status. - StatusMessage string `json:"statusMessage,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Commitment) MarshalJSON() ([]byte, error) { - type noMethod Commitment - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type CommitmentAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: Commitments by scope. - Items map[string]CommitmentsScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#commitmentAggregatedList for aggregated lists of commitments. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CommitmentAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CommitmentList: Contains a list of Commitment resources. -type CommitmentList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of Commitment resources. - Items []*Commitment `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#commitmentList - // for lists of commitments. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CommitmentList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type CommitmentsScopedList struct { - // Commitments: [Output Only] List of commitments contained in this - // scope. - Commitments []*Commitment `json:"commitments,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of commitments when the list is empty. - Warning *CommitmentsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Commitments") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Commitments") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CommitmentsScopedList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CommitmentsScopedListWarning: [Output Only] Informational warning -// which replaces the list of commitments when the list is empty. -type CommitmentsScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*CommitmentsScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CommitmentsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type CommitmentsScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CommitmentsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Condition: A condition to be met. -type Condition struct { - // Iam: Trusted attributes supplied by the IAM system. - // - // Possible values: - // "APPROVER" - // "ATTRIBUTION" - // "AUTHORITY" - // "JUSTIFICATION_TYPE" - // "NO_ATTR" - // "SECURITY_REALM" - Iam string `json:"iam,omitempty"` - - // Op: An operator to apply the subject with. - // - // Possible values: - // "DISCHARGED" - // "EQUALS" - // "IN" - // "NOT_EQUALS" - // "NOT_IN" - // "NO_OP" - Op string `json:"op,omitempty"` - - // Svc: Trusted attributes discharged by the service. - Svc string `json:"svc,omitempty"` - - // Sys: Trusted attributes supplied by any service that owns resources - // and uses the IAM system for access control. - // - // Possible values: - // "IP" - // "NAME" - // "NO_ATTR" - // "REGION" - // "SERVICE" - Sys string `json:"sys,omitempty"` - - // Value: DEPRECATED. Use 'values' instead. - Value string `json:"value,omitempty"` - - // Values: The objects of the condition. This is mutually exclusive with - // 'value'. - Values []string `json:"values,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Iam") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Iam") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Condition) MarshalJSON() ([]byte, error) { - type noMethod Condition - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ConnectionDraining: Message containing connection draining -// configuration. -type ConnectionDraining struct { - // DrainingTimeoutSec: Time for which instance will be drained (not - // accept new connections, but still work to finish started). - DrainingTimeoutSec int64 `json:"drainingTimeoutSec,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DrainingTimeoutSec") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DrainingTimeoutSec") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ConnectionDraining) MarshalJSON() ([]byte, error) { - type noMethod ConnectionDraining - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CustomerEncryptionKey: Represents a customer-supplied encryption key -type CustomerEncryptionKey struct { - // RawKey: Specifies a 256-bit customer-supplied encryption key, encoded - // in RFC 4648 base64 to either encrypt or decrypt this resource. - RawKey string `json:"rawKey,omitempty"` - - // RsaEncryptedKey: Specifies an RFC 4648 base64 encoded, RSA-wrapped - // 2048-bit customer-supplied encryption key to either encrypt or - // decrypt this resource. - // - // The key must meet the following requirements before you can provide - // it to Compute Engine: - // - The key is wrapped using a RSA public key certificate provided by - // Google. - // - After being wrapped, the key must be encoded in RFC 4648 base64 - // encoding. Get the RSA public key certificate provided by Google - // at: - // https://cloud-certs.storage.googleapis.com/google-cloud-csek-ingre - // ss.pem - RsaEncryptedKey string `json:"rsaEncryptedKey,omitempty"` - - // Sha256: [Output only] The RFC 4648 base64 encoded SHA-256 hash of the - // customer-supplied encryption key that protects this resource. - Sha256 string `json:"sha256,omitempty"` - - // ForceSendFields is a list of field names (e.g. "RawKey") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "RawKey") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CustomerEncryptionKey) MarshalJSON() ([]byte, error) { - type noMethod CustomerEncryptionKey - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type CustomerEncryptionKeyProtectedDisk struct { - // DiskEncryptionKey: Decrypts data associated with the disk with a - // customer-supplied encryption key. - DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` - - // Source: Specifies a valid partial or full URL to an existing - // Persistent Disk resource. This field is only applicable for - // persistent disks. - Source string `json:"source,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DiskEncryptionKey") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DiskEncryptionKey") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *CustomerEncryptionKeyProtectedDisk) MarshalJSON() ([]byte, error) { - type noMethod CustomerEncryptionKeyProtectedDisk - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DeprecationStatus: Deprecation status for a public resource. -type DeprecationStatus struct { - // Deleted: An optional RFC3339 timestamp on or after which the state of - // this resource is intended to change to DELETED. This is only - // informational and the status will not change unless the client - // explicitly changes it. - Deleted string `json:"deleted,omitempty"` - - // Deprecated: An optional RFC3339 timestamp on or after which the state - // of this resource is intended to change to DEPRECATED. This is only - // informational and the status will not change unless the client - // explicitly changes it. - Deprecated string `json:"deprecated,omitempty"` - - // Obsolete: An optional RFC3339 timestamp on or after which the state - // of this resource is intended to change to OBSOLETE. This is only - // informational and the status will not change unless the client - // explicitly changes it. - Obsolete string `json:"obsolete,omitempty"` - - // Replacement: The URL of the suggested replacement for a deprecated - // resource. The suggested replacement resource must be the same kind of - // resource as the deprecated resource. - Replacement string `json:"replacement,omitempty"` - - // State: The deprecation state of this resource. This can be - // DEPRECATED, OBSOLETE, or DELETED. Operations which create a new - // resource using a DEPRECATED resource will return successfully, but - // with a warning indicating the deprecated resource and recommending - // its replacement. Operations which use OBSOLETE or DELETED resources - // will be rejected and result in an error. - // - // Possible values: - // "DELETED" - // "DEPRECATED" - // "OBSOLETE" - State string `json:"state,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Deleted") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Deleted") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { - type noMethod DeprecationStatus - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Disk: A Disk resource. -type Disk struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DiskEncryptionKey: Encrypts the disk using a customer-supplied - // encryption key. - // - // After you encrypt a disk with a customer-supplied key, you must - // provide the same key if you use the disk later (e.g. to create a disk - // snapshot or an image, or to attach the disk to a virtual - // machine). - // - // Customer-supplied encryption keys do not protect access to metadata - // of the disk. - // - // If you do not provide an encryption key when creating the disk, then - // the disk will be encrypted using an automatically generated key and - // you do not need to provide a key to use the disk later. - DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#disk for - // disks. - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for the labels being applied to this - // disk, which is essentially a hash of the labels set used for - // optimistic locking. The fingerprint is initially generated by Compute - // Engine and changes after every request to modify or update labels. - // You must always provide an up-to-date fingerprint hash in order to - // update or change labels. - // - // To see the latest fingerprint, make a get() request to retrieve a - // disk. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this disk. These can be later modified by - // the setLabels method. - Labels map[string]string `json:"labels,omitempty"` - - // LastAttachTimestamp: [Output Only] Last attach timestamp in RFC3339 - // text format. - LastAttachTimestamp string `json:"lastAttachTimestamp,omitempty"` - - // LastDetachTimestamp: [Output Only] Last detach timestamp in RFC3339 - // text format. - LastDetachTimestamp string `json:"lastDetachTimestamp,omitempty"` - - // Licenses: Any applicable publicly visible licenses. - Licenses []string `json:"licenses,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Options: Internal use only. - Options string `json:"options,omitempty"` - - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. - SelfLink string `json:"selfLink,omitempty"` - - // SizeGb: Size of the persistent disk, specified in GB. You can specify - // this field when creating a persistent disk using the sourceImage or - // sourceSnapshot parameter, or specify it alone to create an empty - // persistent disk. - // - // If you specify this field along with sourceImage or sourceSnapshot, - // the value of sizeGb must not be less than the size of the sourceImage - // or the size of the snapshot. - SizeGb int64 `json:"sizeGb,omitempty,string"` - - // SourceImage: The source image used to create this disk. If the source - // image is deleted, this field will not be set. - // - // To create a disk with one of the public operating system images, - // specify the image by its family name. For example, specify - // family/debian-8 to use the latest Debian 8 - // image: - // - // projects/debian-cloud/global/images/family/debian-8 - // - // Alternatively, use a specific version of a public operating system - // image: - // - // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD - // - // To create a disk with a private image that you created, specify the - // image name in the following format: - // - // global/images/my-private-image - // - // You can also specify a private image by its image family, which - // returns the latest version of the image in that family. Replace the - // image name with - // family/family-name: - // - // global/images/family/my-private-family - SourceImage string `json:"sourceImage,omitempty"` - - // SourceImageEncryptionKey: The customer-supplied encryption key of the - // source image. Required if the source image is protected by a - // customer-supplied encryption key. - SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` - - // SourceImageId: [Output Only] The ID value of the image used to create - // this disk. This value identifies the exact image that was used to - // create this persistent disk. For example, if you created the - // persistent disk from an image that was later deleted and recreated - // under the same name, the source image ID would identify the exact - // version of the image that was used. - SourceImageId string `json:"sourceImageId,omitempty"` - - // SourceSnapshot: The source snapshot used to create this disk. You can - // provide this as a partial or full URL to the resource. For example, - // the following are valid values: - // - - // https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot - // - projects/project/global/snapshots/snapshot - // - global/snapshots/snapshot - SourceSnapshot string `json:"sourceSnapshot,omitempty"` - - // SourceSnapshotEncryptionKey: The customer-supplied encryption key of - // the source snapshot. Required if the source snapshot is protected by - // a customer-supplied encryption key. - SourceSnapshotEncryptionKey *CustomerEncryptionKey `json:"sourceSnapshotEncryptionKey,omitempty"` - - // SourceSnapshotId: [Output Only] The unique ID of the snapshot used to - // create this disk. This value identifies the exact snapshot that was - // used to create this persistent disk. For example, if you created the - // persistent disk from a snapshot that was later deleted and recreated - // under the same name, the source snapshot ID would identify the exact - // version of the snapshot that was used. - SourceSnapshotId string `json:"sourceSnapshotId,omitempty"` - - // Status: [Output Only] The status of disk creation. - // - // Possible values: - // "CREATING" - // "FAILED" - // "READY" - // "RESTORING" - Status string `json:"status,omitempty"` - - // StorageType: [Deprecated] Storage type of the persistent disk. - // - // Possible values: - // "HDD" - // "SSD" - StorageType string `json:"storageType,omitempty"` - - // Type: URL of the disk type resource describing which disk type to use - // to create the disk. Provide this when creating the disk. - Type string `json:"type,omitempty"` - - // Users: [Output Only] Links to the users of the disk (attached - // instances) in form: project/zones/zone/instances/instance - Users []string `json:"users,omitempty"` - - // Zone: [Output Only] URL of the zone where the disk resides. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Disk) MarshalJSON() ([]byte, error) { - type noMethod Disk - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type DiskAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A map of scoped disk lists. - Items map[string]DisksScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#diskAggregatedList for aggregated lists of persistent disks. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. Acceptable values are 0 to 500, inclusive. (Default: - // 500) - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod DiskAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DiskList: A list of Disk resources. -type DiskList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Disk resources. - Items []*Disk `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#diskList for - // lists of disks. - Kind string `json:"kind,omitempty"` - - // NextPageToken: This token allows you to get the next page of results - // for list requests. If the number of results is larger than - // maxResults, use the nextPageToken as a value for the query parameter - // pageToken in the next list request. Subsequent list requests will - // have their own nextPageToken to continue paging through the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DiskList) MarshalJSON() ([]byte, error) { - type noMethod DiskList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type DiskMoveRequest struct { - // DestinationZone: The URL of the destination zone to move the disk. - // This can be a full or partial URL. For example, the following are all - // valid URLs to a zone: - // - https://www.googleapis.com/compute/v1/projects/project/zones/zone - // - // - projects/project/zones/zone - // - zones/zone - DestinationZone string `json:"destinationZone,omitempty"` - - // TargetDisk: The URL of the target disk to move. This can be a full or - // partial URL. For example, the following are all valid URLs to a disk: - // - // - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk - // - projects/project/zones/zone/disks/disk - // - zones/zone/disks/disk - TargetDisk string `json:"targetDisk,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DestinationZone") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DestinationZone") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { - type noMethod DiskMoveRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DiskType: A DiskType resource. -type DiskType struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // DefaultDiskSizeGb: [Output Only] Server-defined default disk size in - // GB. - DefaultDiskSizeGb int64 `json:"defaultDiskSizeGb,omitempty,string"` - - // Deprecated: [Output Only] The deprecation status associated with this - // disk type. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] An optional description of this resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#diskType for - // disk types. - Kind string `json:"kind,omitempty"` - - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ValidDiskSize: [Output Only] An optional textual description of the - // valid disk size, such as "10GB-10TB". - ValidDiskSize string `json:"validDiskSize,omitempty"` - - // Zone: [Output Only] URL of the zone where the disk type resides. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *DiskType) MarshalJSON() ([]byte, error) { - type noMethod DiskType - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type DiskTypeAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A map of scoped disk type lists. - Items map[string]DiskTypesScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#diskTypeAggregatedList. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DiskTypeList: Contains a list of disk types. -type DiskTypeList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Disk Type resources. - Items []*DiskType `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#diskTypeList for - // disk types. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DiskTypeList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypeList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type DiskTypesScopedList struct { - // DiskTypes: [Output Only] List of disk types contained in this scope. - DiskTypes []*DiskType `json:"diskTypes,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of disk types when the list is empty. - Warning *DiskTypesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DiskTypes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DiskTypes") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DiskTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DiskTypesScopedListWarning: [Output Only] Informational warning which -// replaces the list of disk types when the list is empty. -type DiskTypesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*DiskTypesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DiskTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type DiskTypesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DiskTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DiskTypesScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type DisksResizeRequest struct { - // SizeGb: The new size of the persistent disk, which is specified in - // GB. - SizeGb int64 `json:"sizeGb,omitempty,string"` - - // ForceSendFields is a list of field names (e.g. "SizeGb") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "SizeGb") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DisksResizeRequest) MarshalJSON() ([]byte, error) { - type noMethod DisksResizeRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type DisksScopedList struct { - // Disks: [Output Only] List of disks contained in this scope. - Disks []*Disk `json:"disks,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of disks when the list is empty. - Warning *DisksScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Disks") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Disks") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DisksScopedList) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DisksScopedListWarning: [Output Only] Informational warning which -// replaces the list of disks when the list is empty. -type DisksScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*DisksScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DisksScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type DisksScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod DisksScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Firewall: Represents a Firewall resource. -type Firewall struct { - // Allowed: The list of ALLOW rules specified by this firewall. Each - // rule specifies a protocol and port-range tuple that describes a - // permitted connection. - Allowed []*FirewallAllowed `json:"allowed,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Denied: The list of DENY rules specified by this firewall. Each rule - // specifies a protocol and port-range tuple that describes a permitted - // connection. - Denied []*FirewallDenied `json:"denied,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DestinationRanges: If destination ranges are specified, the firewall - // will apply only to traffic that has destination IP address in these - // ranges. These ranges must be expressed in CIDR format. Only IPv4 is - // supported. - DestinationRanges []string `json:"destinationRanges,omitempty"` - - // Direction: Direction of traffic to which this firewall applies; - // default is INGRESS. Note: For INGRESS traffic, it is NOT supported to - // specify destinationRanges; For EGRESS traffic, it is NOT supported to - // specify sourceRanges OR sourceTags. - // - // Possible values: - // "EGRESS" - // "INGRESS" - Direction string `json:"direction,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Ony] Type of the resource. Always compute#firewall for - // firewall rules. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource; provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: URL of the network resource for this firewall rule. If not - // specified when creating a firewall rule, the default network is - // used: - // global/networks/default - // If you choose to specify this property, you can specify the network - // as a full or partial URL. For example, the following are all valid - // URLs: - // - - // https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - // - projects/myproject/global/networks/my-network - // - global/networks/default - Network string `json:"network,omitempty"` - - // Priority: Priority for this rule. This is an integer between 0 and - // 65535, both inclusive. When not specified, the value assumed is 1000. - // Relative priorities determine precedence of conflicting rules. Lower - // value of priority implies higher precedence (eg, a rule with priority - // 0 has higher precedence than a rule with priority 1). DENY rules take - // precedence over ALLOW rules having equal priority. - Priority int64 `json:"priority,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SourceRanges: If source ranges are specified, the firewall will apply - // only to traffic that has source IP address in these ranges. These - // ranges must be expressed in CIDR format. One or both of sourceRanges - // and sourceTags may be set. If both properties are set, the firewall - // will apply to traffic that has source IP address within sourceRanges - // OR the source IP that belongs to a tag listed in the sourceTags - // property. The connection does not need to match both properties for - // the firewall to apply. Only IPv4 is supported. - SourceRanges []string `json:"sourceRanges,omitempty"` - - // SourceTags: If source tags are specified, the firewall will apply - // only to traffic with source IP that belongs to a tag listed in source - // tags. Source tags cannot be used to control traffic to an instance's - // external IP address. Because tags are associated with an instance, - // not an IP address. One or both of sourceRanges and sourceTags may be - // set. If both properties are set, the firewall will apply to traffic - // that has source IP address within sourceRanges OR the source IP that - // belongs to a tag listed in the sourceTags property. The connection - // does not need to match both properties for the firewall to apply. - SourceTags []string `json:"sourceTags,omitempty"` - - // TargetTags: A list of instance tags indicating sets of instances - // located in the network that may make network connections as specified - // in allowed[]. If no targetTags are specified, the firewall rule - // applies to all instances on the specified network. - TargetTags []string `json:"targetTags,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Allowed") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Allowed") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Firewall) MarshalJSON() ([]byte, error) { - type noMethod Firewall - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type FirewallAllowed struct { - // IPProtocol: The IP protocol to which this rule applies. The protocol - // type is required when creating a firewall rule. This value can either - // be one of the following well known protocol strings (tcp, udp, icmp, - // esp, ah, sctp), or the IP protocol number. - IPProtocol string `json:"IPProtocol,omitempty"` - - // Ports: An optional list of ports to which this rule applies. This - // field is only applicable for UDP or TCP protocol. Each entry must be - // either an integer or a range. If not specified, this rule applies to - // connections through any port. - // - // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. - Ports []string `json:"ports,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IPProtocol") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IPProtocol") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *FirewallAllowed) MarshalJSON() ([]byte, error) { - type noMethod FirewallAllowed - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type FirewallDenied struct { - // IPProtocol: The IP protocol to which this rule applies. The protocol - // type is required when creating a firewall rule. This value can either - // be one of the following well known protocol strings (tcp, udp, icmp, - // esp, ah, sctp), or the IP protocol number. - IPProtocol string `json:"IPProtocol,omitempty"` - - // Ports: An optional list of ports to which this rule applies. This - // field is only applicable for UDP or TCP protocol. Each entry must be - // either an integer or a range. If not specified, this rule applies to - // connections through any port. - // - // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. - Ports []string `json:"ports,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IPProtocol") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IPProtocol") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *FirewallDenied) MarshalJSON() ([]byte, error) { - type noMethod FirewallDenied - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// FirewallList: Contains a list of firewalls. -type FirewallList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Firewall resources. - Items []*Firewall `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#firewallList for - // lists of firewalls. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *FirewallList) MarshalJSON() ([]byte, error) { - type noMethod FirewallList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ForwardingRule: A ForwardingRule resource. A ForwardingRule resource -// specifies which pool of target virtual machines to forward a packet -// to if it matches the given [IPAddress, IPProtocol, ports] tuple. -type ForwardingRule struct { - // IPAddress: The IP address that this forwarding rule is serving on - // behalf of. - // - // For global forwarding rules, the address must be a global IP. For - // regional forwarding rules, the address must live in the same region - // as the forwarding rule. By default, this field is empty and an - // ephemeral IP from the same scope (global or regional) will be - // assigned. - // - // When the load balancing scheme is INTERNAL, this can only be an RFC - // 1918 IP address belonging to the network/subnetwork configured for - // the forwarding rule. A reserved address cannot be used. If the field - // is empty, the IP address will be automatically allocated from the - // internal IP range of the subnetwork or network configured for this - // forwarding rule. Only IPv4 is supported. - IPAddress string `json:"IPAddress,omitempty"` - - // IPProtocol: The IP protocol to which this rule applies. Valid options - // are TCP, UDP, ESP, AH, SCTP or ICMP. - // - // When the load balancing scheme is INTERNAL, only TCP and UDP are - // valid. - // - // Possible values: - // "AH" - // "ESP" - // "ICMP" - // "SCTP" - // "TCP" - // "UDP" - IPProtocol string `json:"IPProtocol,omitempty"` - - // BackendService: This field is not used for external load - // balancing. - // - // For internal load balancing, this field identifies the BackendService - // resource to receive the matched traffic. - BackendService string `json:"backendService,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // IpVersion: The IP Version that will be used by this forwarding rule. - // Valid options are IPV4 or IPV6. This can only be specified for a - // global forwarding rule. - // - // Possible values: - // "IPV4" - // "IPV6" - // "UNSPECIFIED_VERSION" - IpVersion string `json:"ipVersion,omitempty"` - - // Kind: [Output Only] Type of the resource. Always - // compute#forwardingRule for Forwarding Rule resources. - Kind string `json:"kind,omitempty"` - - // LoadBalancingScheme: This signifies what the ForwardingRule will be - // used for and can only take the following values: INTERNAL, EXTERNAL - // The value of INTERNAL means that this will be used for Internal - // Network Load Balancing (TCP, UDP). The value of EXTERNAL means that - // this will be used for External Load Balancing (HTTP(S) LB, External - // TCP/UDP LB, SSL Proxy) - // - // Possible values: - // "EXTERNAL" - // "INTERNAL" - // "INVALID" - LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` - - // Name: Name of the resource; provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: This field is not used for external load balancing. - // - // For internal load balancing, this field identifies the network that - // the load balanced IP should belong to for this Forwarding Rule. If - // this field is not specified, the default network will be used. - Network string `json:"network,omitempty"` - - // PortRange: This field is used along with the target field for - // TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, - // TargetVpnGateway, TargetPool, TargetInstance. - // - // Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets - // addressed to ports in the specified range will be forwarded to - // target. Forwarding rules with the same [IPAddress, IPProtocol] pair - // must have disjoint port ranges. - // - // Some types of forwarding target have constraints on the acceptable - // ports: - // - TargetHttpProxy: 80, 8080 - // - TargetHttpsProxy: 443 - // - TargetSslProxy: 443 - // - TargetVpnGateway: 500, 4500 - // - - PortRange string `json:"portRange,omitempty"` - - // Ports: This field is used along with the backend_service field for - // internal load balancing. - // - // When the load balancing scheme is INTERNAL, a single port or a comma - // separated list of ports can be configured. Only packets addressed to - // these ports will be forwarded to the backends configured with this - // forwarding rule. - // - // You may specify a maximum of up to 5 ports. - Ports []string `json:"ports,omitempty"` - - // Region: [Output Only] URL of the region where the regional forwarding - // rule resides. This field is not applicable to global forwarding - // rules. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServiceLabel: An optional prefix to the service name for this - // Forwarding Rule. If specified, will be the first label of the fully - // qualified service name. - // - // The label must be 1-63 characters long, and comply with RFC1035. - // Specifically, the label must be 1-63 characters long and match the - // regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first - // character must be a lowercase letter, and all following characters - // must be a dash, lowercase letter, or digit, except the last - // character, which cannot be a dash. - // - // This field is only used for internal load balancing. - ServiceLabel string `json:"serviceLabel,omitempty"` - - // ServiceName: [Output Only] The internal fully qualified service name - // for this Forwarding Rule. - // - // This field is only used for internal load balancing. - ServiceName string `json:"serviceName,omitempty"` - - // Subnetwork: This field is not used for external load balancing. - // - // For internal load balancing, this field identifies the subnetwork - // that the load balanced IP should belong to for this Forwarding - // Rule. - // - // If the network specified is in auto subnet mode, this field is - // optional. However, if the network is in custom subnet mode, a - // subnetwork must be specified. - Subnetwork string `json:"subnetwork,omitempty"` - - // Target: The URL of the target resource to receive the matched - // traffic. For regional forwarding rules, this target must live in the - // same region as the forwarding rule. For global forwarding rules, this - // target must be a global load balancing resource. The forwarded - // traffic must be of a type appropriate to the target object. - // - // This field is not used for internal load balancing. - Target string `json:"target,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "IPAddress") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IPAddress") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ForwardingRule) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRule - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ForwardingRuleAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A map of scoped forwarding rule lists. - Items map[string]ForwardingRulesScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#forwardingRuleAggregatedList for lists of forwarding rules. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ForwardingRuleList: Contains a list of ForwardingRule resources. -type ForwardingRuleList struct { - // Id: [Output Only] Unique identifier for the resource. Set by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of ForwardingRule resources. - Items []*ForwardingRule `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ForwardingRuleList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRuleList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ForwardingRulesScopedList struct { - // ForwardingRules: List of forwarding rules contained in this scope. - ForwardingRules []*ForwardingRule `json:"forwardingRules,omitempty"` - - // Warning: Informational warning which replaces the list of forwarding - // rules when the list is empty. - Warning *ForwardingRulesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ForwardingRules") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ForwardingRules") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ForwardingRulesScopedList) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ForwardingRulesScopedListWarning: Informational warning which -// replaces the list of forwarding rules when the list is empty. -type ForwardingRulesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*ForwardingRulesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ForwardingRulesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ForwardingRulesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod ForwardingRulesScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type GlobalSetLabelsRequest struct { - // LabelFingerprint: The fingerprint of the previous set of labels for - // this resource, used to detect conflicts. The fingerprint is initially - // generated by Compute Engine and changes after every request to modify - // or update labels. You must always provide an up-to-date fingerprint - // hash when updating or changing labels. Make a get() request to the - // resource to get the latest fingerprint. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: A list of labels to apply for this resource. Each label key & - // value must comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a - // lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. For example, "webserver-frontend": "images". A label value - // can also be empty (e.g. "my-label": ""). - Labels map[string]string `json:"labels,omitempty"` - - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *GlobalSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod GlobalSetLabelsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// GuestOsFeature: Guest OS features. -type GuestOsFeature struct { - // Type: The type of supported feature. Currenty only - // VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the - // server might also populate this property with the value WINDOWS to - // indicate that this is a Windows image. This value is purely - // informational and does not enable or disable any features. - // - // Possible values: - // "FEATURE_TYPE_UNSPECIFIED" - // "MULTI_IP_SUBNET" - // "VIRTIO_SCSI_MULTIQUEUE" - // "WINDOWS" - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Type") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Type") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *GuestOsFeature) MarshalJSON() ([]byte, error) { - type noMethod GuestOsFeature - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type HTTPHealthCheck struct { - // Host: The value of the host header in the HTTP health check request. - // If left empty (default value), the IP on behalf of which this health - // check is performed will be used. - Host string `json:"host,omitempty"` - - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // RequestPath: The request path of the HTTP health check request. The - // default value is /. - RequestPath string `json:"requestPath,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Host") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Host") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *HTTPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HTTPHealthCheck - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type HTTPSHealthCheck struct { - // Host: The value of the host header in the HTTPS health check request. - // If left empty (default value), the IP on behalf of which this health - // check is performed will be used. - Host string `json:"host,omitempty"` - - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // RequestPath: The request path of the HTTPS health check request. The - // default value is /. - RequestPath string `json:"requestPath,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Host") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Host") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HTTPSHealthCheck - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// HealthCheck: An HealthCheck resource. This resource defines a -// template for how individual virtual machines should be checked for -// health, via one of the supported protocols. -type HealthCheck struct { - // CheckIntervalSec: How often (in seconds) to send a health check. The - // default value is 5 seconds. - CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in 3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // HealthyThreshold: A so-far unhealthy instance will be marked healthy - // after this many consecutive successes. The default value is 2. - HealthyThreshold int64 `json:"healthyThreshold,omitempty"` - - HttpHealthCheck *HTTPHealthCheck `json:"httpHealthCheck,omitempty"` - - HttpsHealthCheck *HTTPSHealthCheck `json:"httpsHealthCheck,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: Type of the resource. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - SslHealthCheck *SSLHealthCheck `json:"sslHealthCheck,omitempty"` - - TcpHealthCheck *TCPHealthCheck `json:"tcpHealthCheck,omitempty"` - - // TimeoutSec: How long (in seconds) to wait before claiming failure. - // The default value is 5 seconds. It is invalid for timeoutSec to have - // greater value than checkIntervalSec. - TimeoutSec int64 `json:"timeoutSec,omitempty"` - - // Type: Specifies the type of the healthCheck, either TCP, SSL, HTTP or - // HTTPS. If not specified, the default is TCP. Exactly one of the - // protocol-specific health check field must be specified, which must - // match type field. - // - // Possible values: - // "HTTP" - // "HTTPS" - // "INVALID" - // "SSL" - // "TCP" - // "UDP" - Type string `json:"type,omitempty"` - - UdpHealthCheck *UDPHealthCheck `json:"udpHealthCheck,omitempty"` - - // UnhealthyThreshold: A so-far healthy instance will be marked - // unhealthy after this many consecutive failures. The default value is - // 2. - UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CheckIntervalSec") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CheckIntervalSec") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *HealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HealthCheck - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// HealthCheckList: Contains a list of HealthCheck resources. -type HealthCheckList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of HealthCheck resources. - Items []*HealthCheck `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *HealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// HealthCheckReference: A full or valid partial URL to a health check. -// For example, the following are valid URLs: -// - -// https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check -// - projects/project-id/global/httpHealthChecks/health-check -// - global/httpHealthChecks/health-check -type HealthCheckReference struct { - HealthCheck string `json:"healthCheck,omitempty"` - - // ForceSendFields is a list of field names (e.g. "HealthCheck") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HealthCheck") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { - type noMethod HealthCheckReference - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type HealthStatus struct { - // HealthState: Health state of the instance. - // - // Possible values: - // "HEALTHY" - // "UNHEALTHY" - HealthState string `json:"healthState,omitempty"` - - // Instance: URL of the instance resource. - Instance string `json:"instance,omitempty"` - - // IpAddress: The IP address represented by this resource. - IpAddress string `json:"ipAddress,omitempty"` - - // Port: The port on the instance. - Port int64 `json:"port,omitempty"` - - // ForceSendFields is a list of field names (e.g. "HealthState") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HealthState") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *HealthStatus) MarshalJSON() ([]byte, error) { - type noMethod HealthStatus - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// HostRule: UrlMaps A host-matching rule for a URL. If matched, will -// use the named PathMatcher to select the BackendService. -type HostRule struct { - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Hosts: The list of host patterns to match. They must be valid - // hostnames, except * will match any string of ([a-z0-9-.]*). In that - // case, * must be the first character and must be followed in the - // pattern by either - or .. - Hosts []string `json:"hosts,omitempty"` - - // PathMatcher: The name of the PathMatcher to use to match the path - // portion of the URL if the hostRule matches the URL's host portion. - PathMatcher string `json:"pathMatcher,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *HostRule) MarshalJSON() ([]byte, error) { - type noMethod HostRule - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// HttpHealthCheck: An HttpHealthCheck resource. This resource defines a -// template for how individual instances should be checked for health, -// via HTTP. -type HttpHealthCheck struct { - // CheckIntervalSec: How often (in seconds) to send a health check. The - // default value is 5 seconds. - CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // HealthyThreshold: A so-far unhealthy instance will be marked healthy - // after this many consecutive successes. The default value is 2. - HealthyThreshold int64 `json:"healthyThreshold,omitempty"` - - // Host: The value of the host header in the HTTP health check request. - // If left empty (default value), the public IP on behalf of which this - // health check is performed will be used. - Host string `json:"host,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always - // compute#httpHealthCheck for HTTP health checks. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Port: The TCP port number for the HTTP health check request. The - // default value is 80. - Port int64 `json:"port,omitempty"` - - // RequestPath: The request path of the HTTP health check request. The - // default value is /. - RequestPath string `json:"requestPath,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // TimeoutSec: How long (in seconds) to wait before claiming failure. - // The default value is 5 seconds. It is invalid for timeoutSec to have - // greater value than checkIntervalSec. - TimeoutSec int64 `json:"timeoutSec,omitempty"` - - // UnhealthyThreshold: A so-far healthy instance will be marked - // unhealthy after this many consecutive failures. The default value is - // 2. - UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CheckIntervalSec") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CheckIntervalSec") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *HttpHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheck - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// HttpHealthCheckList: Contains a list of HttpHealthCheck resources. -type HttpHealthCheckList struct { - // Id: [Output Only] Unique identifier for the resource. Defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of HttpHealthCheck resources. - Items []*HttpHealthCheck `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *HttpHealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HttpHealthCheckList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// HttpsHealthCheck: An HttpsHealthCheck resource. This resource defines -// a template for how individual instances should be checked for health, -// via HTTPS. -type HttpsHealthCheck struct { - // CheckIntervalSec: How often (in seconds) to send a health check. The - // default value is 5 seconds. - CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // HealthyThreshold: A so-far unhealthy instance will be marked healthy - // after this many consecutive successes. The default value is 2. - HealthyThreshold int64 `json:"healthyThreshold,omitempty"` - - // Host: The value of the host header in the HTTPS health check request. - // If left empty (default value), the public IP on behalf of which this - // health check is performed will be used. - Host string `json:"host,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: Type of the resource. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Port: The TCP port number for the HTTPS health check request. The - // default value is 443. - Port int64 `json:"port,omitempty"` - - // RequestPath: The request path of the HTTPS health check request. The - // default value is "/". - RequestPath string `json:"requestPath,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // TimeoutSec: How long (in seconds) to wait before claiming failure. - // The default value is 5 seconds. It is invalid for timeoutSec to have - // a greater value than checkIntervalSec. - TimeoutSec int64 `json:"timeoutSec,omitempty"` - - // UnhealthyThreshold: A so-far healthy instance will be marked - // unhealthy after this many consecutive failures. The default value is - // 2. - UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CheckIntervalSec") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CheckIntervalSec") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *HttpsHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheck - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// HttpsHealthCheckList: Contains a list of HttpsHealthCheck resources. -type HttpsHealthCheckList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of HttpsHealthCheck resources. - Items []*HttpsHealthCheck `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *HttpsHealthCheckList) MarshalJSON() ([]byte, error) { - type noMethod HttpsHealthCheckList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Image: An Image resource. -type Image struct { - // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google - // Cloud Storage (in bytes). - ArchiveSizeBytes int64 `json:"archiveSizeBytes,omitempty,string"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Deprecated: The deprecation status associated with this image. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DiskSizeGb: Size of the image when restored onto a persistent disk - // (in GB). - DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - - // Family: The name of the image family to which this image belongs. You - // can create disks by specifying an image family instead of a specific - // image name. The image family always returns its latest image that is - // not deprecated. The name of the image family must comply with - // RFC1035. - Family string `json:"family,omitempty"` - - // GuestOsFeatures: A list of features to enable on the guest OS. - // Applicable for bootable images only. Currently, only one feature can - // be enabled, VIRTIO_SCSCI_MULTIQUEUE, which allows each virtual CPU to - // have its own queue. For Windows images, you can only enable - // VIRTIO_SCSCI_MULTIQUEUE on images with driver version 1.2.0.1621 or - // higher. Linux images with kernel versions 3.17 and higher will - // support VIRTIO_SCSCI_MULTIQUEUE. - // - // For new Windows images, the server might also populate this field - // with the value WINDOWS, to indicate that this is a Windows image. - // This value is purely informational and does not enable or disable any - // features. - GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // ImageEncryptionKey: Encrypts the image using a customer-supplied - // encryption key. - // - // After you encrypt an image with a customer-supplied key, you must - // provide the same key if you use the image later (e.g. to create a - // disk from the image). - // - // Customer-supplied encryption keys do not protect access to metadata - // of the disk. - // - // If you do not provide an encryption key when creating the image, then - // the disk will be encrypted using an automatically generated key and - // you do not need to provide a key to use the image later. - ImageEncryptionKey *CustomerEncryptionKey `json:"imageEncryptionKey,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#image for - // images. - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for the labels being applied to this - // image, which is essentially a hash of the labels used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update labels. You must - // always provide an up-to-date fingerprint hash in order to update or - // change labels. - // - // To see the latest fingerprint, make a get() request to retrieve an - // image. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this image. These can be later modified by - // the setLabels method. - Labels map[string]string `json:"labels,omitempty"` - - // Licenses: Any applicable license URI. - Licenses []string `json:"licenses,omitempty"` - - // Name: Name of the resource; provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // RawDisk: The parameters of the raw disk image. - RawDisk *ImageRawDisk `json:"rawDisk,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SourceDisk: URL of the source disk used to create this image. This - // can be a full or valid partial URL. You must provide either this - // property or the rawDisk.source property but not both to create an - // image. For example, the following are valid values: - // - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk - // - projects/project/zones/zone/disks/disk - // - zones/zone/disks/disk - SourceDisk string `json:"sourceDisk,omitempty"` - - // SourceDiskEncryptionKey: The customer-supplied encryption key of the - // source disk. Required if the source disk is protected by a - // customer-supplied encryption key. - SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` - - // SourceDiskId: The ID value of the disk used to create this image. - // This value may be used to determine whether the image was taken from - // the current or a previous instance of a given disk name. - SourceDiskId string `json:"sourceDiskId,omitempty"` - - // SourceType: The type of the image used to create this disk. The - // default and only value is RAW - // - // Possible values: - // "RAW" (default) - SourceType string `json:"sourceType,omitempty"` - - // Status: [Output Only] The status of the image. An image can be used - // to create other resources, such as instances, only after the image - // has been successfully created and the status is set to READY. - // Possible values are FAILED, PENDING, or READY. - // - // Possible values: - // "FAILED" - // "PENDING" - // "READY" - Status string `json:"status,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ArchiveSizeBytes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ArchiveSizeBytes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Image) MarshalJSON() ([]byte, error) { - type noMethod Image - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ImageRawDisk: The parameters of the raw disk image. -type ImageRawDisk struct { - // ContainerType: The format used to encode and transmit the block - // device, which should be TAR. This is just a container and - // transmission format and not a runtime format. Provided by the client - // when the disk image is created. - // - // Possible values: - // "TAR" - ContainerType string `json:"containerType,omitempty"` - - // Sha1Checksum: An optional SHA1 checksum of the disk image before - // unpackaging; provided by the client when the disk image is created. - Sha1Checksum string `json:"sha1Checksum,omitempty"` - - // Source: The full Google Cloud Storage URL where the disk image is - // stored. You must provide either this property or the sourceDisk - // property but not both. - Source string `json:"source,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ContainerType") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ContainerType") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ImageRawDisk) MarshalJSON() ([]byte, error) { - type noMethod ImageRawDisk - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ImageList: Contains a list of images. -type ImageList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Image resources. - Items []*Image `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ImageList) MarshalJSON() ([]byte, error) { - type noMethod ImageList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Instance: An Instance resource. -type Instance struct { - // CanIpForward: Allows this instance to send and receive packets with - // non-matching destination or source IPs. This is required if you plan - // to use this instance to forward routes. For more information, see - // Enabling IP Forwarding. - CanIpForward bool `json:"canIpForward,omitempty"` - - // CpuPlatform: [Output Only] The CPU platform used by this instance. - CpuPlatform string `json:"cpuPlatform,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Disks: Array of disks associated with this instance. Persistent disks - // must be created before you can assign them. - Disks []*AttachedDisk `json:"disks,omitempty"` - - // GuestAccelerators: List of the type and count of accelerator cards - // attached to the instance. - GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#instance for - // instances. - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for this request, which is - // essentially a hash of the metadata's contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update metadata. You must - // always provide an up-to-date fingerprint hash in order to update or - // change metadata. - // - // To see the latest fingerprint, make get() request to the instance. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this instance. These can be later modified - // by the setLabels method. - Labels map[string]string `json:"labels,omitempty"` - - // MachineType: Full or partial URL of the machine type resource to use - // for this instance, in the format: - // zones/zone/machineTypes/machine-type. This is provided by the client - // when the instance is created. For example, the following is a valid - // partial url to a predefined machine - // type: - // - // zones/us-central1-f/machineTypes/n1-standard-1 - // - // To create a custom machine type, provide a URL to a machine type in - // the following format, where CPUS is 1 or an even number up to 32 (2, - // 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. - // Memory must be a multiple of 256 MB and must be supplied in MB (e.g. - // 5 GB of memory is 5120 - // MB): - // - // zones/zone/machineTypes/custom-CPUS-MEMORY - // - // For example: zones/us-central1-f/machineTypes/custom-4-5120 - // - // For a full list of restrictions, read the Specifications for custom - // machine types. - MachineType string `json:"machineType,omitempty"` - - // Metadata: The metadata key/value pairs assigned to this instance. - // This includes custom metadata and predefined keys. - Metadata *Metadata `json:"metadata,omitempty"` - - // Name: The name of the resource, provided by the client when initially - // creating the resource. The resource name must be 1-63 characters - // long, and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a - // lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // NetworkInterfaces: An array of configurations for this interface. - // This specifies how this interface is configured to interact with - // other network services, such as connecting to the internet. Only one - // interface is supported per instance. - NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` - - // Scheduling: Sets the scheduling options for this instance. - Scheduling *Scheduling `json:"scheduling,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServiceAccounts: A list of service accounts, with their specified - // scopes, authorized for this instance. Only one service account per VM - // instance is supported. - // - // Service accounts generate access tokens that can be accessed through - // the metadata server and used to authenticate applications on the - // instance. See Service Accounts for more information. - ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` - - // Status: [Output Only] The status of the instance. One of the - // following values: PROVISIONING, STAGING, RUNNING, STOPPING, - // SUSPENDING, SUSPENDED, and TERMINATED. - // - // Possible values: - // "PROVISIONING" - // "RUNNING" - // "STAGING" - // "STOPPED" - // "STOPPING" - // "SUSPENDED" - // "SUSPENDING" - // "TERMINATED" - Status string `json:"status,omitempty"` - - // StatusMessage: [Output Only] An optional, human-readable explanation - // of the status. - StatusMessage string `json:"statusMessage,omitempty"` - - // Tags: A list of tags to apply to this instance. Tags are used to - // identify valid sources or targets for network firewalls and are - // specified by the client during instance creation. The tags can be - // later modified by the setTags method. Each tag within the list must - // comply with RFC1035. - Tags *Tags `json:"tags,omitempty"` - - // Zone: [Output Only] URL of the zone where the instance resides. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CanIpForward") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CanIpForward") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Instance) MarshalJSON() ([]byte, error) { - type noMethod Instance - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A map of scoped instance lists. - Items map[string]InstancesScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#instanceAggregatedList for aggregated lists of Instance - // resources. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroup struct { - // CreationTimestamp: [Output Only] The creation timestamp for this - // instance group in RFC3339 text format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Fingerprint: [Output Only] The fingerprint of the named ports. The - // system uses this fingerprint to detect conflicts when multiple users - // change the named ports concurrently. - Fingerprint string `json:"fingerprint,omitempty"` - - // Id: [Output Only] A unique identifier for this instance group, - // generated by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroup for instance groups. - Kind string `json:"kind,omitempty"` - - // Name: The name of the instance group. The name must be 1-63 - // characters long, and comply with RFC1035. - Name string `json:"name,omitempty"` - - // NamedPorts: Assigns a name to a port number. For example: {name: - // "http", port: 80} - // - // This allows the system to reference ports by the assigned name - // instead of a port number. Named ports can also contain multiple - // ports. For example: [{name: "http", port: 80},{name: "http", port: - // 8080}] - // - // Named ports apply to all instances in this instance group. - NamedPorts []*NamedPort `json:"namedPorts,omitempty"` - - // Network: The URL of the network to which all instances in the - // instance group belong. - Network string `json:"network,omitempty"` - - // Region: The URL of the region where the instance group is located - // (for regional resources). - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] The URL for this instance group. The server - // generates this URL. - SelfLink string `json:"selfLink,omitempty"` - - // Size: [Output Only] The total number of instances in the instance - // group. - Size int64 `json:"size,omitempty"` - - // Subnetwork: The URL of the subnetwork to which all instances in the - // instance group belong. - Subnetwork string `json:"subnetwork,omitempty"` - - // Zone: [Output Only] The URL of the zone where the instance group is - // located (for zonal resources). - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroup) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroup - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupAggregatedList struct { - // Id: [Output Only] A unique identifier for this aggregated list of - // instance groups. The server generates this identifier. - Id string `json:"id,omitempty"` - - // Items: A map of scoped instance group lists. - Items map[string]InstanceGroupsScopedList `json:"items,omitempty"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupAggregatedList for aggregated lists of instance - // groups. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] The URL for this resource type. The server - // generates this URL. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InstanceGroupList: A list of InstanceGroup resources. -type InstanceGroupList struct { - // Id: [Output Only] A unique identifier for this list of instance - // groups. The server generates this identifier. - Id string `json:"id,omitempty"` - - // Items: A list of instance groups. - Items []*InstanceGroup `json:"items,omitempty"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupList for instance group lists. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] The URL for this resource type. The server - // generates this URL. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InstanceGroupManager: An Instance Group Manager resource. -type InstanceGroupManager struct { - // AutoHealingPolicies: The autohealing policy for this managed instance - // group. You can specify only one value. - AutoHealingPolicies []*InstanceGroupManagerAutoHealingPolicy `json:"autoHealingPolicies,omitempty"` - - // BaseInstanceName: The base instance name to use for instances in this - // group. The value must be 1-58 characters long. Instances are named by - // appending a hyphen and a random four-character string to the base - // instance name. The base instance name must comply with RFC1035. - BaseInstanceName string `json:"baseInstanceName,omitempty"` - - // CreationTimestamp: [Output Only] The creation timestamp for this - // managed instance group in RFC3339 text format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // CurrentActions: [Output Only] The list of instance actions and the - // number of instances in this managed instance group that are scheduled - // for each of those actions. - CurrentActions *InstanceGroupManagerActionsSummary `json:"currentActions,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // FailoverAction: The action to perform in case of zone failure. Only - // one value is supported, NO_FAILOVER. The default is NO_FAILOVER. - // - // Possible values: - // "NO_FAILOVER" - // "UNKNOWN" - FailoverAction string `json:"failoverAction,omitempty"` - - // Fingerprint: [Output Only] The fingerprint of the resource data. You - // can use this optional field for optimistic locking when you update - // the resource. - Fingerprint string `json:"fingerprint,omitempty"` - - // Id: [Output Only] A unique identifier for this resource type. The - // server generates this identifier. - Id uint64 `json:"id,omitempty,string"` - - // InstanceGroup: [Output Only] The URL of the Instance Group resource. - InstanceGroup string `json:"instanceGroup,omitempty"` - - // InstanceTemplate: The URL of the instance template that is specified - // for this managed instance group. The group uses this template to - // create all new instances in the managed instance group. - InstanceTemplate string `json:"instanceTemplate,omitempty"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupManager for managed instance groups. - Kind string `json:"kind,omitempty"` - - // Name: The name of the managed instance group. The name must be 1-63 - // characters long, and comply with RFC1035. - Name string `json:"name,omitempty"` - - // NamedPorts: Named ports configured for the Instance Groups - // complementary to this Instance Group Manager. - NamedPorts []*NamedPort `json:"namedPorts,omitempty"` - - // Region: [Output Only] The URL of the region where the managed - // instance group resides (for regional resources). - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] The URL for this managed instance group. The - // server defines this URL. - SelfLink string `json:"selfLink,omitempty"` - - // ServiceAccount: Service account will be used as credentials for all - // operations performed by managed instance group on instances. The - // service accounts needs all permissions required to create and delete - // instances. When not specified, the service account - // {projectNumber}@cloudservices.gserviceaccount.com will be used. - ServiceAccount string `json:"serviceAccount,omitempty"` - - // TargetPools: The URLs for all TargetPool resources to which instances - // in the instanceGroup field are added. The target pools automatically - // apply to all of the instances in the managed instance group. - TargetPools []string `json:"targetPools,omitempty"` - - // TargetSize: The target number of running instances for this managed - // instance group. Deleting or abandoning instances reduces this number. - // Resizing the group changes this number. - TargetSize int64 `json:"targetSize,omitempty"` - - // Zone: [Output Only] The URL of the zone where the managed instance - // group is located (for zonal resources). - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "AutoHealingPolicies") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutoHealingPolicies") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManager) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManager - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagerActionsSummary struct { - // Abandoning: [Output Only] The total number of instances in the - // managed instance group that are scheduled to be abandoned. Abandoning - // an instance removes it from the managed instance group without - // deleting it. - Abandoning int64 `json:"abandoning,omitempty"` - - // Creating: [Output Only] The number of instances in the managed - // instance group that are scheduled to be created or are currently - // being created. If the group fails to create any of these instances, - // it tries again until it creates the instance successfully. - // - // If you have disabled creation retries, this field will not be - // populated; instead, the creatingWithoutRetries field will be - // populated. - Creating int64 `json:"creating,omitempty"` - - // CreatingWithoutRetries: [Output Only] The number of instances that - // the managed instance group will attempt to create. The group attempts - // to create each instance only once. If the group fails to create any - // of these instances, it decreases the group's targetSize value - // accordingly. - CreatingWithoutRetries int64 `json:"creatingWithoutRetries,omitempty"` - - // Deleting: [Output Only] The number of instances in the managed - // instance group that are scheduled to be deleted or are currently - // being deleted. - Deleting int64 `json:"deleting,omitempty"` - - // None: [Output Only] The number of instances in the managed instance - // group that are running and have no scheduled actions. - None int64 `json:"none,omitempty"` - - // Recreating: [Output Only] The number of instances in the managed - // instance group that are scheduled to be recreated or are currently - // being being recreated. Recreating an instance deletes the existing - // root persistent disk and creates a new disk from the image that is - // defined in the instance template. - Recreating int64 `json:"recreating,omitempty"` - - // Refreshing: [Output Only] The number of instances in the managed - // instance group that are being reconfigured with properties that do - // not require a restart or a recreate action. For example, setting or - // removing target pools for the instance. - Refreshing int64 `json:"refreshing,omitempty"` - - // Restarting: [Output Only] The number of instances in the managed - // instance group that are scheduled to be restarted or are currently - // being restarted. - Restarting int64 `json:"restarting,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Abandoning") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Abandoning") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagerActionsSummary) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerActionsSummary - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagerAggregatedList struct { - // Id: [Output Only] A unique identifier for this aggregated list of - // managed instance groups. The server generates this identifier. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A map of filtered managed instance group lists. - Items map[string]InstanceGroupManagersScopedList `json:"items,omitempty"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupManagerAggregatedList for an aggregated list of - // managed instance groups. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] The URL for this resource type. The server - // generates this URL. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagerAutoHealingPolicy struct { - // HealthCheck: The URL for the health check that signals autohealing. - HealthCheck string `json:"healthCheck,omitempty"` - - // InitialDelaySec: The number of seconds that the managed instance - // group waits before it applies autohealing policies to new instances - // or recently recreated instances. This initial delay allows instances - // to initialize and run their startup scripts before the instance group - // determines that they are UNHEALTHY. This prevents the managed - // instance group from recreating its instances prematurely. This value - // must be from range [0, 3600]. - InitialDelaySec int64 `json:"initialDelaySec,omitempty"` - - // ForceSendFields is a list of field names (e.g. "HealthCheck") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HealthCheck") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagerAutoHealingPolicy) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerAutoHealingPolicy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InstanceGroupManagerList: [Output Only] A list of managed instance -// groups. -type InstanceGroupManagerList struct { - // Id: [Output Only] A unique identifier for this resource type. The - // server generates this identifier. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of managed instance groups. - Items []*InstanceGroupManager `json:"items,omitempty"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupManagerList for a list of managed instance - // groups. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagersAbandonInstancesRequest struct { - // Instances: The URLs of one or more instances to abandon. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersAbandonInstancesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagersDeleteInstancesRequest struct { - // Instances: The URLs of one or more instances to delete. This can be a - // full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersDeleteInstancesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagersListManagedInstancesResponse struct { - // ManagedInstances: [Output Only] The list of instances in the managed - // instance group. - ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ManagedInstances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ManagedInstances") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagersListManagedInstancesResponse) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersListManagedInstancesResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagersRecreateInstancesRequest struct { - // Instances: The URLs of one or more instances to recreate. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagersRecreateInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersRecreateInstancesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagersResizeAdvancedRequest struct { - // NoCreationRetries: If this flag is true, the managed instance group - // attempts to create all instances initiated by this resize request - // only once. If there is an error during creation, the managed instance - // group does not retry create this instance, and we will decrease the - // targetSize of the request instead. If the flag is false, the group - // attemps to recreate each instance continuously until it - // succeeds. - // - // This flag matters only in the first attempt of creation of an - // instance. After an instance is successfully created while this flag - // is enabled, the instance behaves the same way as all the other - // instances created with a regular resize request. In particular, if a - // running instance dies unexpectedly at a later time and needs to be - // recreated, this mode does not affect the recreation behavior in that - // scenario. - // - // This flag is applicable only to the current resize request. It does - // not influence other resize requests in any way. - // - // You can see which instances is being creating in which mode by - // calling the get or listManagedInstances API. - NoCreationRetries bool `json:"noCreationRetries,omitempty"` - - // TargetSize: The number of running instances that the managed instance - // group should maintain at any given time. The group automatically adds - // or removes instances to maintain the number of instances specified by - // this parameter. - TargetSize int64 `json:"targetSize,omitempty"` - - // ForceSendFields is a list of field names (e.g. "NoCreationRetries") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NoCreationRetries") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagersResizeAdvancedRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersResizeAdvancedRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagersScopedList struct { - // InstanceGroupManagers: [Output Only] The list of managed instance - // groups that are contained in the specified project and zone. - InstanceGroupManagers []*InstanceGroupManager `json:"instanceGroupManagers,omitempty"` - - // Warning: [Output Only] The warning that replaces the list of managed - // instance groups when the list is empty. - Warning *InstanceGroupManagersScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "InstanceGroupManagers") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceGroupManagers") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagersScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InstanceGroupManagersScopedListWarning: [Output Only] The warning -// that replaces the list of managed instance groups when the list is -// empty. -type InstanceGroupManagersScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*InstanceGroupManagersScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagersScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagersSetAutoHealingRequest struct { - AutoHealingPolicies []*InstanceGroupManagerAutoHealingPolicy `json:"autoHealingPolicies,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AutoHealingPolicies") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutoHealingPolicies") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagersSetAutoHealingRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersSetAutoHealingRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagersSetInstanceTemplateRequest struct { - // InstanceTemplate: The URL of the instance template that is specified - // for this managed instance group. The group uses this template to - // create all new instances in the managed instance group. - InstanceTemplate string `json:"instanceTemplate,omitempty"` - - // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceTemplate") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagersSetInstanceTemplateRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersSetInstanceTemplateRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupManagersSetTargetPoolsRequest struct { - // Fingerprint: The fingerprint of the target pools information. Use - // this optional property to prevent conflicts when multiple users - // change the target pools settings concurrently. Obtain the fingerprint - // with the instanceGroupManagers.get method. Then, include the - // fingerprint in your request to ensure that you do not overwrite - // changes that were applied from another concurrent request. - Fingerprint string `json:"fingerprint,omitempty"` - - // TargetPools: The list of target pool URLs that instances in this - // managed instance group belong to. The managed instance group applies - // these target pools to all of the instances in the group. Existing - // instances and new instances in the group all receive these target - // pool settings. - TargetPools []string `json:"targetPools,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagersSetTargetPoolsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupsAddInstancesRequest struct { - // Instances: The list of instances to add to the instance group. - Instances []*InstanceReference `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupsAddInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsAddInstancesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupsListInstances struct { - // Id: [Output Only] A unique identifier for this list of instances in - // the specified instance group. The server generates this identifier. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of instances and any named ports that are - // assigned to those instances. - Items []*InstanceWithNamedPorts `json:"items,omitempty"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupsListInstances for the list of instances in the - // specified instance group. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] The URL for this list of instances in the - // specified instance groups. The server generates this URL. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstances - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupsListInstancesRequest struct { - // InstanceState: A filter for the state of the instances in the - // instance group. Valid options are ALL or RUNNING. If you do not - // specify this parameter the list includes all instances regardless of - // their state. - // - // Possible values: - // "ALL" - // "RUNNING" - InstanceState string `json:"instanceState,omitempty"` - - // ForceSendFields is a list of field names (e.g. "InstanceState") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceState") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsListInstancesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupsRemoveInstancesRequest struct { - // Instances: The list of instances to remove from the instance group. - Instances []*InstanceReference `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupsRemoveInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsRemoveInstancesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupsScopedList struct { - // InstanceGroups: [Output Only] The list of instance groups that are - // contained in this scope. - InstanceGroups []*InstanceGroup `json:"instanceGroups,omitempty"` - - // Warning: [Output Only] An informational warning that replaces the - // list of instance groups when the list is empty. - Warning *InstanceGroupsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "InstanceGroups") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceGroups") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupsScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InstanceGroupsScopedListWarning: [Output Only] An informational -// warning that replaces the list of instance groups when the list is -// empty. -type InstanceGroupsScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*InstanceGroupsScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupsScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceGroupsSetNamedPortsRequest struct { - // Fingerprint: The fingerprint of the named ports information for this - // instance group. Use this optional property to prevent conflicts when - // multiple users change the named ports settings concurrently. Obtain - // the fingerprint with the instanceGroups.get method. Then, include the - // fingerprint in your request to ensure that you do not overwrite - // changes that were applied from another concurrent request. - Fingerprint string `json:"fingerprint,omitempty"` - - // NamedPorts: The list of named ports to set for this instance group. - NamedPorts []*NamedPort `json:"namedPorts,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupsSetNamedPortsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InstanceList: Contains a list of instances. -type InstanceList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of instances. - Items []*Instance `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#instanceList for - // lists of Instance resources. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceList) MarshalJSON() ([]byte, error) { - type noMethod InstanceList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InstanceListReferrers: Contains a list of instance referrers. -type InstanceListReferrers struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of referrers. - Items []*Reference `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#instanceListReferrers for lists of Instance referrers. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceListReferrers) MarshalJSON() ([]byte, error) { - type noMethod InstanceListReferrers - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceMoveRequest struct { - // DestinationZone: The URL of the destination zone to move the - // instance. This can be a full or partial URL. For example, the - // following are all valid URLs to a zone: - // - https://www.googleapis.com/compute/v1/projects/project/zones/zone - // - // - projects/project/zones/zone - // - zones/zone - DestinationZone string `json:"destinationZone,omitempty"` - - // TargetInstance: The URL of the target instance to move. This can be a - // full or partial URL. For example, the following are all valid URLs to - // an instance: - // - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance - // - projects/project/zones/zone/instances/instance - // - zones/zone/instances/instance - TargetInstance string `json:"targetInstance,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DestinationZone") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DestinationZone") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { - type noMethod InstanceMoveRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceProperties struct { - // CanIpForward: Enables instances created based on this template to - // send packets with source IP addresses other than their own and - // receive packets with destination IP addresses other than their own. - // If these instances will be used as an IP gateway or it will be set as - // the next-hop in a Route resource, specify true. If unsure, leave this - // set to false. See the Enable IP forwarding for instances - // documentation for more information. - CanIpForward bool `json:"canIpForward,omitempty"` - - // Description: An optional text description for the instances that are - // created from this instance template. - Description string `json:"description,omitempty"` - - // Disks: An array of disks that are associated with the instances that - // are created from this template. - Disks []*AttachedDisk `json:"disks,omitempty"` - - // GuestAccelerators: A list of guest accelerator cards' type and count - // to use for instances created from the instance template. - GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` - - // Labels: Labels to apply to instances that are created from this - // template. - Labels map[string]string `json:"labels,omitempty"` - - // MachineType: The machine type to use for instances that are created - // from this template. - MachineType string `json:"machineType,omitempty"` - - // Metadata: The metadata key/value pairs to assign to instances that - // are created from this template. These pairs can consist of custom - // metadata or predefined keys. See Project and instance metadata for - // more information. - Metadata *Metadata `json:"metadata,omitempty"` - - // NetworkInterfaces: An array of network access configurations for this - // interface. - NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` - - // Scheduling: Specifies the scheduling options for the instances that - // are created from this template. - Scheduling *Scheduling `json:"scheduling,omitempty"` - - // ServiceAccounts: A list of service accounts with specified scopes. - // Access tokens for these service accounts are available to the - // instances that are created from this template. Use metadata queries - // to obtain the access tokens for these instances. - ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` - - // Tags: A list of tags to apply to the instances that are created from - // this template. The tags identify valid sources or targets for network - // firewalls. The setTags method can modify this list of tags. Each tag - // within the list must comply with RFC1035. - Tags *Tags `json:"tags,omitempty"` - - // ForceSendFields is a list of field names (e.g. "CanIpForward") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CanIpForward") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceProperties) MarshalJSON() ([]byte, error) { - type noMethod InstanceProperties - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceReference struct { - // Instance: The URL for a specific instance. - Instance string `json:"instance,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instance") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instance") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceReference) MarshalJSON() ([]byte, error) { - type noMethod InstanceReference - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InstanceTemplate: An Instance Template resource. -type InstanceTemplate struct { - // CreationTimestamp: [Output Only] The creation timestamp for this - // instance template in RFC3339 text format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] A unique identifier for this instance template. The - // server defines this identifier. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceTemplate for instance templates. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource; provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Properties: The instance properties for this instance template. - Properties *InstanceProperties `json:"properties,omitempty"` - - // SelfLink: [Output Only] The URL for this instance template. The - // server defines this URL. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstanceTemplate) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplate - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InstanceTemplateList: A list of instance templates. -type InstanceTemplateList struct { - // Id: [Output Only] A unique identifier for this instance template. The - // server defines this identifier. - Id string `json:"id,omitempty"` - - // Items: [Output Only] list of InstanceTemplate resources. - Items []*InstanceTemplate `json:"items,omitempty"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceTemplatesListResponse for instance template lists. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] The URL for this instance template list. The - // server defines this URL. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceTemplateList) MarshalJSON() ([]byte, error) { - type noMethod InstanceTemplateList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstanceWithNamedPorts struct { - // Instance: [Output Only] The URL of the instance. - Instance string `json:"instance,omitempty"` - - // NamedPorts: [Output Only] The named ports that belong to this - // instance group. - NamedPorts []*NamedPort `json:"namedPorts,omitempty"` - - // Status: [Output Only] The status of the instance. - // - // Possible values: - // "PROVISIONING" - // "RUNNING" - // "STAGING" - // "STOPPED" - // "STOPPING" - // "SUSPENDED" - // "SUSPENDING" - // "TERMINATED" - Status string `json:"status,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instance") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instance") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { - type noMethod InstanceWithNamedPorts - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstancesScopedList struct { - // Instances: [Output Only] List of instances contained in this scope. - Instances []*Instance `json:"instances,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of instances when the list is empty. - Warning *InstancesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstancesScopedList) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InstancesScopedListWarning: [Output Only] Informational warning which -// replaces the list of instances when the list is empty. -type InstancesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*InstancesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstancesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod InstancesScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstancesSetLabelsRequest struct { - // LabelFingerprint: Fingerprint of the previous set of labels for this - // resource, used to prevent conflicts. Provide the latest fingerprint - // value when making a request to add or change labels. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - Labels map[string]string `json:"labels,omitempty"` - - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstancesSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetLabelsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstancesSetMachineResourcesRequest struct { - // GuestAccelerators: List of the type and count of accelerator cards - // attached to the instance. - GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` - - // ForceSendFields is a list of field names (e.g. "GuestAccelerators") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "GuestAccelerators") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InstancesSetMachineResourcesRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetMachineResourcesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstancesSetMachineTypeRequest struct { - // MachineType: Full or partial URL of the machine type resource. See - // Machine Types for a full list of machine types. For example: - // zones/us-central1-f/machineTypes/n1-standard-1 - MachineType string `json:"machineType,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MachineType") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MachineType") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstancesSetMachineTypeRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetMachineTypeRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstancesSetServiceAccountRequest struct { - // Email: Email address of the service account. - Email string `json:"email,omitempty"` - - // Scopes: The list of scopes to be made available for this service - // account. - Scopes []string `json:"scopes,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Email") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Email") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstancesSetServiceAccountRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesSetServiceAccountRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InstancesStartWithEncryptionKeyRequest struct { - // Disks: Array of disks associated with this instance that are - // protected with a customer-supplied encryption key. - // - // In order to start the instance, the disk url and its corresponding - // key must be provided. - // - // If the disk is not protected with a customer-supplied encryption key - // it should not be specified. - Disks []*CustomerEncryptionKeyProtectedDisk `json:"disks,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Disks") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Disks") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { - type noMethod InstancesStartWithEncryptionKeyRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// License: A license resource. -type License struct { - // ChargesUseFee: [Output Only] If true, the customer will be charged - // license fee for running software that contains this license on an - // instance. - ChargesUseFee bool `json:"chargesUseFee,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#license for - // licenses. - Kind string `json:"kind,omitempty"` - - // Name: [Output Only] Name of the resource. The name is 1-63 characters - // long and complies with RFC1035. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ChargesUseFee") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ChargesUseFee") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *License) MarshalJSON() ([]byte, error) { - type noMethod License - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LogConfig: Specifies what kind of log the caller must write -type LogConfig struct { - // Counter: Counter options. - Counter *LogConfigCounterOptions `json:"counter,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Counter") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Counter") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogConfig) MarshalJSON() ([]byte, error) { - type noMethod LogConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LogConfigCounterOptions: Options for counters -type LogConfigCounterOptions struct { - // Field: The field value to attribute. - Field string `json:"field,omitempty"` - - // Metric: The metric to update. - Metric string `json:"metric,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Field") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Field") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogConfigCounterOptions) MarshalJSON() ([]byte, error) { - type noMethod LogConfigCounterOptions - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MachineType: A Machine Type resource. -type MachineType struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Deprecated: [Output Only] The deprecation status associated with this - // machine type. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] An optional textual description of the - // resource. - Description string `json:"description,omitempty"` - - // GuestCpus: [Output Only] The number of virtual CPUs that are - // available to the instance. - GuestCpus int64 `json:"guestCpus,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // IsSharedCpu: [Output Only] Whether this machine type has a shared - // CPU. See Shared-core machine types for more information. - IsSharedCpu bool `json:"isSharedCpu,omitempty"` - - // Kind: [Output Only] The type of the resource. Always - // compute#machineType for machine types. - Kind string `json:"kind,omitempty"` - - // MaximumPersistentDisks: [Output Only] Maximum persistent disks - // allowed. - MaximumPersistentDisks int64 `json:"maximumPersistentDisks,omitempty"` - - // MaximumPersistentDisksSizeGb: [Output Only] Maximum total persistent - // disks size (GB) allowed. - MaximumPersistentDisksSizeGb int64 `json:"maximumPersistentDisksSizeGb,omitempty,string"` - - // MemoryMb: [Output Only] The amount of physical memory available to - // the instance, defined in MB. - MemoryMb int64 `json:"memoryMb,omitempty"` - - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Zone: [Output Only] The name of the zone where the machine type - // resides, such as us-central1-a. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *MachineType) MarshalJSON() ([]byte, error) { - type noMethod MachineType - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type MachineTypeAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A map of scoped machine type lists. - Items map[string]MachineTypesScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#machineTypeAggregatedList for aggregated lists of machine - // types. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MachineTypeList: Contains a list of machine types. -type MachineTypeList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Machine Type resources. - Items []*MachineType `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#machineTypeList - // for lists of machine types. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MachineTypeList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type MachineTypesScopedList struct { - // MachineTypes: [Output Only] List of machine types contained in this - // scope. - MachineTypes []*MachineType `json:"machineTypes,omitempty"` - - // Warning: [Output Only] An informational warning that appears when the - // machine types list is empty. - Warning *MachineTypesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MachineTypes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MachineTypes") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MachineTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MachineTypesScopedListWarning: [Output Only] An informational warning -// that appears when the machine types list is empty. -type MachineTypesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*MachineTypesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MachineTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type MachineTypesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MachineTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ManagedInstance struct { - // CurrentAction: [Output Only] The current action that the managed - // instance group has scheduled for the instance. Possible values: - // - NONE The instance is running, and the managed instance group does - // not have any scheduled actions for this instance. - // - CREATING The managed instance group is creating this instance. If - // the group fails to create this instance, it will try again until it - // is successful. - // - CREATING_WITHOUT_RETRIES The managed instance group is attempting - // to create this instance only once. If the group fails to create this - // instance, it does not try again and the group's targetSize value is - // decreased instead. - // - RECREATING The managed instance group is recreating this instance. - // - // - DELETING The managed instance group is permanently deleting this - // instance. - // - ABANDONING The managed instance group is abandoning this instance. - // The instance will be removed from the instance group and from any - // target pools that are associated with this group. - // - RESTARTING The managed instance group is restarting the instance. - // - // - REFRESHING The managed instance group is applying configuration - // changes to the instance without stopping it. For example, the group - // can update the target pool list for an instance without stopping that - // instance. - // - // Possible values: - // "ABANDONING" - // "CREATING" - // "CREATING_WITHOUT_RETRIES" - // "DELETING" - // "NONE" - // "RECREATING" - // "REFRESHING" - // "RESTARTING" - CurrentAction string `json:"currentAction,omitempty"` - - // Id: [Output only] The unique identifier for this resource. This field - // is empty when instance does not exist. - Id uint64 `json:"id,omitempty,string"` - - // Instance: [Output Only] The URL of the instance. The URL can exist - // even if the instance has not yet been created. - Instance string `json:"instance,omitempty"` - - // InstanceStatus: [Output Only] The status of the instance. This field - // is empty when the instance does not exist. - // - // Possible values: - // "PROVISIONING" - // "RUNNING" - // "STAGING" - // "STOPPED" - // "STOPPING" - // "SUSPENDED" - // "SUSPENDING" - // "TERMINATED" - InstanceStatus string `json:"instanceStatus,omitempty"` - - // LastAttempt: [Output Only] Information about the last attempt to - // create or delete the instance. - LastAttempt *ManagedInstanceLastAttempt `json:"lastAttempt,omitempty"` - - // Version: [Output Only] Intended version of this instance. - Version *ManagedInstanceVersion `json:"version,omitempty"` - - // ForceSendFields is a list of field names (e.g. "CurrentAction") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CurrentAction") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ManagedInstance) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstance - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ManagedInstanceLastAttempt struct { - // Errors: [Output Only] Encountered errors during the last attempt to - // create or delete the instance. - Errors *ManagedInstanceLastAttemptErrors `json:"errors,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Errors") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Errors") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ManagedInstanceLastAttempt) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttempt - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ManagedInstanceLastAttemptErrors: [Output Only] Encountered errors -// during the last attempt to create or delete the instance. -type ManagedInstanceLastAttemptErrors struct { - // Errors: [Output Only] The array of errors encountered while - // processing this operation. - Errors []*ManagedInstanceLastAttemptErrorsErrors `json:"errors,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Errors") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Errors") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ManagedInstanceLastAttemptErrors) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttemptErrors - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ManagedInstanceLastAttemptErrorsErrors struct { - // Code: [Output Only] The error type identifier for this error. - Code string `json:"code,omitempty"` - - // Location: [Output Only] Indicates the field in the request that - // caused the error. This property is optional. - Location string `json:"location,omitempty"` - - // Message: [Output Only] An optional, human-readable error message. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttemptErrorsErrors - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ManagedInstanceVersion struct { - // InstanceTemplate: [Output Only] The intended template of the - // instance. This field is empty when current_action is one of { - // DELETING, ABANDONING }. - InstanceTemplate string `json:"instanceTemplate,omitempty"` - - // Name: [Output Only] Name of the version. - Name string `json:"name,omitempty"` - - // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceTemplate") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ManagedInstanceVersion) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceVersion - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Metadata: A metadata key/value entry. -type Metadata struct { - // Fingerprint: Specifies a fingerprint for this request, which is - // essentially a hash of the metadata's contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update metadata. You must - // always provide an up-to-date fingerprint hash in order to update or - // change metadata. - Fingerprint string `json:"fingerprint,omitempty"` - - // Items: Array of key/value pairs. The total size of all keys and - // values must be less than 512 KB. - Items []*MetadataItems `json:"items,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#metadata for - // metadata. - Kind string `json:"kind,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Metadata) MarshalJSON() ([]byte, error) { - type noMethod Metadata - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type MetadataItems struct { - // Key: Key for the metadata entry. Keys must conform to the following - // regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is - // reflected as part of a URL in the metadata server. Additionally, to - // avoid ambiguity, keys must not conflict with any other metadata keys - // for the project. - Key string `json:"key,omitempty"` - - // Value: Value for the metadata entry. These are free-form strings, and - // only have meaning as interpreted by the image running in the - // instance. The only restriction placed on values is that their size - // must be less than or equal to 32768 bytes. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MetadataItems) MarshalJSON() ([]byte, error) { - type noMethod MetadataItems - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NamedPort: The named port. For example: . -type NamedPort struct { - // Name: The name for this named port. The name must be 1-63 characters - // long, and comply with RFC1035. - Name string `json:"name,omitempty"` - - // Port: The port number, which can be a value between 1 and 65535. - Port int64 `json:"port,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Name") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Name") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NamedPort) MarshalJSON() ([]byte, error) { - type noMethod NamedPort - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Network: Represents a Network resource. Read Networks and Firewalls -// for more information. -type Network struct { - // IPv4Range: The range of internal addresses that are legal on this - // network. This range is a CIDR specification, for example: - // 192.168.0.0/16. Provided by the client when the network is created. - IPv4Range string `json:"IPv4Range,omitempty"` - - // AutoCreateSubnetworks: When set to true, the network is created in - // "auto subnet mode". When set to false, the network is in "custom - // subnet mode". - // - // In "auto subnet mode", a newly created network is assigned the - // default CIDR of 10.128.0.0/9 and it automatically creates one - // subnetwork per region. - AutoCreateSubnetworks bool `json:"autoCreateSubnetworks,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // GatewayIPv4: A gateway address for default routing to other networks. - // This value is read only and is selected by the Google Compute Engine, - // typically as the first usable address in the IPv4Range. - GatewayIPv4 string `json:"gatewayIPv4,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#network for - // networks. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Peerings: [Output Only] List of network peerings for the resource. - Peerings []*NetworkPeering `json:"peerings,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Subnetworks: [Output Only] Server-defined fully-qualified URLs for - // all subnetworks in this network. - Subnetworks []string `json:"subnetworks,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "IPv4Range") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IPv4Range") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Network) MarshalJSON() ([]byte, error) { - type noMethod Network - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NetworkInterface: A network interface resource attached to an -// instance. -type NetworkInterface struct { - // AccessConfigs: An array of configurations for this interface. - // Currently, only one access config, ONE_TO_ONE_NAT, is supported. If - // there are no accessConfigs specified, then this instance will have no - // external internet access. - AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` - - // AliasIpRanges: An array of alias IP ranges for this network - // interface. Can only be specified for network interfaces on - // subnet-mode networks. - AliasIpRanges []*AliasIpRange `json:"aliasIpRanges,omitempty"` - - // Kind: [Output Only] Type of the resource. Always - // compute#networkInterface for network interfaces. - Kind string `json:"kind,omitempty"` - - // Name: [Output Only] The name of the network interface, generated by - // the server. For network devices, these are eth0, eth1, etc. - Name string `json:"name,omitempty"` - - // Network: URL of the network resource for this instance. When creating - // an instance, if neither the network nor the subnetwork is specified, - // the default network global/networks/default is used; if the network - // is not specified but the subnetwork is specified, the network is - // inferred. - // - // This field is optional when creating a firewall rule. If not - // specified when creating a firewall rule, the default network - // global/networks/default is used. - // - // If you specify this property, you can specify the network as a full - // or partial URL. For example, the following are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/global/networks/network - // - projects/project/global/networks/network - // - global/networks/default - Network string `json:"network,omitempty"` - - // NetworkIP: An IPv4 internal network address to assign to the instance - // for this network interface. If not specified by the user, an unused - // internal IP is assigned by the system. - NetworkIP string `json:"networkIP,omitempty"` - - // Subnetwork: The URL of the Subnetwork resource for this instance. If - // the network resource is in legacy mode, do not provide this property. - // If the network is in auto subnet mode, providing the subnetwork is - // optional. If the network is in custom subnet mode, then this field - // should be specified. If you specify this property, you can specify - // the subnetwork as a full or partial URL. For example, the following - // are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork - // - regions/region/subnetworks/subnetwork - Subnetwork string `json:"subnetwork,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AccessConfigs") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AccessConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NetworkInterface) MarshalJSON() ([]byte, error) { - type noMethod NetworkInterface - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NetworkList: Contains a list of networks. -type NetworkList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Network resources. - Items []*Network `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#networkList for - // lists of networks. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NetworkList) MarshalJSON() ([]byte, error) { - type noMethod NetworkList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NetworkPeering: A network peering attached to a network resource. The -// message includes the peering name, peer network, peering state, and a -// flag indicating whether Google Compute Engine should automatically -// create routes for the peering. -type NetworkPeering struct { - // AutoCreateRoutes: Whether full mesh connectivity is created and - // managed automatically. When it is set to true, Google Compute Engine - // will automatically create and manage the routes between two networks - // when the state is ACTIVE. Otherwise, user needs to create routes - // manually to route packets to peer network. - AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - - // Name: Name of this peering. Provided by the client when the peering - // is created. The name must comply with RFC1035. Specifically, the name - // must be 1-63 characters long and match regular expression - // [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a - // lowercase letter, and all the following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // Network: The URL of the peer network. It can be either full URL or - // partial URL. The peer network may belong to a different project. If - // the partial URL does not contain project, it is assumed that the peer - // network is in the same project as the current network. - Network string `json:"network,omitempty"` - - // State: [Output Only] State for the peering. - // - // Possible values: - // "ACTIVE" - // "INACTIVE" - State string `json:"state,omitempty"` - - // StateDetails: [Output Only] Details about the current state of the - // peering. - StateDetails string `json:"stateDetails,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutoCreateRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *NetworkPeering) MarshalJSON() ([]byte, error) { - type noMethod NetworkPeering - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type NetworksAddPeeringRequest struct { - // AutoCreateRoutes: Whether Google Compute Engine manages the routes - // automatically. - AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - - // Name: Name of the peering, which should conform to RFC1035. - Name string `json:"name,omitempty"` - - // PeerNetwork: URL of the peer network. It can be either full URL or - // partial URL. The peer network may belong to a different project. If - // the partial URL does not contain project, it is assumed that the peer - // network is in the same project as the current network. - PeerNetwork string `json:"peerNetwork,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutoCreateRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksAddPeeringRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type NetworksRemovePeeringRequest struct { - // Name: Name of the peering, which should conform to RFC1035. - Name string `json:"name,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Name") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Name") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksRemovePeeringRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Operation: An Operation resource, used to manage asynchronous API -// requests. -type Operation struct { - // ClientOperationId: [Output Only] Reserved for future use. - ClientOperationId string `json:"clientOperationId,omitempty"` - - // CreationTimestamp: [Deprecated] This field is deprecated. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: [Output Only] A textual description of the operation, - // which is set when the operation is created. - Description string `json:"description,omitempty"` - - // EndTime: [Output Only] The time that this operation was completed. - // This value is in RFC3339 text format. - EndTime string `json:"endTime,omitempty"` - - // Error: [Output Only] If errors are generated during processing of the - // operation, this field will be populated. - Error *OperationError `json:"error,omitempty"` - - // HttpErrorMessage: [Output Only] If the operation fails, this field - // contains the HTTP error message that was returned, such as NOT FOUND. - HttpErrorMessage string `json:"httpErrorMessage,omitempty"` - - // HttpErrorStatusCode: [Output Only] If the operation fails, this field - // contains the HTTP error status code that was returned. For example, a - // 404 means the resource was not found. - HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // InsertTime: [Output Only] The time that this operation was requested. - // This value is in RFC3339 text format. - InsertTime string `json:"insertTime,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#operation - // for Operation resources. - Kind string `json:"kind,omitempty"` - - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` - - // OperationType: [Output Only] The type of operation, such as insert, - // update, or delete, and so on. - OperationType string `json:"operationType,omitempty"` - - // Progress: [Output Only] An optional progress indicator that ranges - // from 0 to 100. There is no requirement that this be linear or support - // any granularity of operations. This should not be used to guess when - // the operation will be complete. This number should monotonically - // increase as the operation progresses. - Progress int64 `json:"progress,omitempty"` - - // Region: [Output Only] The URL of the region where the operation - // resides. Only available when performing regional operations. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // StartTime: [Output Only] The time that this operation was started by - // the server. This value is in RFC3339 text format. - StartTime string `json:"startTime,omitempty"` - - // Status: [Output Only] The status of the operation, which can be one - // of the following: PENDING, RUNNING, or DONE. - // - // Possible values: - // "DONE" - // "PENDING" - // "RUNNING" - Status string `json:"status,omitempty"` - - // StatusMessage: [Output Only] An optional textual description of the - // current status of the operation. - StatusMessage string `json:"statusMessage,omitempty"` - - // TargetId: [Output Only] The unique target ID, which identifies a - // specific incarnation of the target resource. - TargetId uint64 `json:"targetId,omitempty,string"` - - // TargetLink: [Output Only] The URL of the resource that the operation - // modifies. For operations related to creating a snapshot, this points - // to the persistent disk that the snapshot was created from. - TargetLink string `json:"targetLink,omitempty"` - - // User: [Output Only] User who requested the operation, for example: - // user@example.com. - User string `json:"user,omitempty"` - - // Warnings: [Output Only] If warning messages are generated during - // processing of the operation, this field will be populated. - Warnings []*OperationWarnings `json:"warnings,omitempty"` - - // Zone: [Output Only] The URL of the zone where the operation resides. - // Only available when performing per-zone operations. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ClientOperationId") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ClientOperationId") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Operation) MarshalJSON() ([]byte, error) { - type noMethod Operation - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// OperationError: [Output Only] If errors are generated during -// processing of the operation, this field will be populated. -type OperationError struct { - // Errors: [Output Only] The array of errors encountered while - // processing this operation. - Errors []*OperationErrorErrors `json:"errors,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Errors") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Errors") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationError) MarshalJSON() ([]byte, error) { - type noMethod OperationError - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type OperationErrorErrors struct { - // Code: [Output Only] The error type identifier for this error. - Code string `json:"code,omitempty"` - - // Location: [Output Only] Indicates the field in the request that - // caused the error. This property is optional. - Location string `json:"location,omitempty"` - - // Message: [Output Only] An optional, human-readable error message. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { - type noMethod OperationErrorErrors - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type OperationWarnings struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*OperationWarningsData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationWarnings) MarshalJSON() ([]byte, error) { - type noMethod OperationWarnings - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type OperationWarningsData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { - type noMethod OperationWarningsData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type OperationAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A map of scoped operation lists. - Items map[string]OperationsScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#operationAggregatedList for aggregated lists of operations. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod OperationAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// OperationList: Contains a list of Operation resources. -type OperationList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Operation resources. - Items []*Operation `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#operations for - // Operations resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationList) MarshalJSON() ([]byte, error) { - type noMethod OperationList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type OperationsScopedList struct { - // Operations: [Output Only] List of operations contained in this scope. - Operations []*Operation `json:"operations,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of operations when the list is empty. - Warning *OperationsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Operations") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Operations") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// OperationsScopedListWarning: [Output Only] Informational warning -// which replaces the list of operations when the list is empty. -type OperationsScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*OperationsScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type OperationsScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PathMatcher: A matcher for the path portion of the URL. The -// BackendService from the longest-matched rule will serve the URL. If -// no rule was matched, the default service will be used. -type PathMatcher struct { - // DefaultService: The full or partial URL to the BackendService - // resource. This will be used if none of the pathRules defined by this - // PathMatcher is matched by the URL's path portion. For example, the - // following are all valid URLs to a BackendService resource: - // - - // https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService - // - compute/v1/projects/project/global/backendServices/backendService - // - // - global/backendServices/backendService - DefaultService string `json:"defaultService,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Name: The name to which this PathMatcher is referred by the HostRule. - Name string `json:"name,omitempty"` - - // PathRules: The list of path rules. - PathRules []*PathRule `json:"pathRules,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DefaultService") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DefaultService") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *PathMatcher) MarshalJSON() ([]byte, error) { - type noMethod PathMatcher - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PathRule: A path-matching rule for a URL. If matched, will use the -// specified BackendService to handle the traffic arriving at this URL. -type PathRule struct { - // Paths: The list of path patterns to match. Each must start with / and - // the only place a * is allowed is at the end following a /. The string - // fed to the path matcher does not include any text after the first ? - // or #, and those chars are not allowed here. - Paths []string `json:"paths,omitempty"` - - // Service: The URL of the BackendService resource if this rule is - // matched. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Paths") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Paths") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PathRule) MarshalJSON() ([]byte, error) { - type noMethod PathRule - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Policy: Defines an Identity and Access Management (IAM) policy. It is -// used to specify access control policies for Cloud Platform -// resources. -// -// -// -// A `Policy` consists of a list of `bindings`. A `Binding` binds a list -// of `members` to a `role`, where the members can be user accounts, -// Google groups, Google domains, and service accounts. A `role` is a -// named list of permissions defined by IAM. -// -// **Example** -// -// { "bindings": [ { "role": "roles/owner", "members": [ -// "user:mike@example.com", "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-other-app@appspot.gserviceaccount.com", ] }, { -// "role": "roles/viewer", "members": ["user:sean@example.com"] } ] -// } -// -// For a description of IAM and its features, see the [IAM developer's -// guide](https://cloud.google.com/iam). -type Policy struct { - // AuditConfigs: Specifies cloud audit logging configuration for this - // policy. - AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - - // Bindings: Associates a list of `members` to a `role`. Multiple - // `bindings` must not be specified for the same `role`. `bindings` with - // no members will result in an error. - Bindings []*Binding `json:"bindings,omitempty"` - - // Etag: `etag` is used for optimistic concurrency control as a way to - // help prevent simultaneous updates of a policy from overwriting each - // other. It is strongly suggested that systems make use of the `etag` - // in the read-modify-write cycle to perform policy updates in order to - // avoid race conditions: An `etag` is returned in the response to - // `getIamPolicy`, and systems are expected to put that etag in the - // request to `setIamPolicy` to ensure that their change will be applied - // to the same version of the policy. - // - // If no `etag` is provided in the call to `setIamPolicy`, then the - // existing policy is overwritten blindly. - Etag string `json:"etag,omitempty"` - - IamOwned bool `json:"iamOwned,omitempty"` - - // Rules: If more than one rule is specified, the rules are applied in - // the following manner: - All matching LOG rules are always applied. - - // If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging - // will be applied if one or more matching rule requires logging. - - // Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is - // granted. Logging will be applied if one or more matching rule - // requires logging. - Otherwise, if no rule applies, permission is - // denied. - Rules []*Rule `json:"rules,omitempty"` - - // Version: Version of the `Policy`. The default version is 0. - Version int64 `json:"version,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "AuditConfigs") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AuditConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Policy) MarshalJSON() ([]byte, error) { - type noMethod Policy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Project: A Project resource. Projects can only be created in the -// Google Cloud Platform Console. Unless marked otherwise, values can -// only be modified in the console. -type Project struct { - // CommonInstanceMetadata: Metadata key/value pairs available to all - // instances contained in this project. See Custom metadata for more - // information. - CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // DefaultServiceAccount: [Output Only] Default service account used by - // VMs running in this project. - DefaultServiceAccount string `json:"defaultServiceAccount,omitempty"` - - // Description: An optional textual description of the resource. - Description string `json:"description,omitempty"` - - // EnabledFeatures: Restricted features enabled for use on this project. - EnabledFeatures []string `json:"enabledFeatures,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. This is not the project ID, and - // is just a unique ID used by Compute Engine to identify resources. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#project for - // projects. - Kind string `json:"kind,omitempty"` - - // Name: The project ID. For example: my-example-project. Use the - // project ID to make requests to Compute Engine. - Name string `json:"name,omitempty"` - - // Quotas: [Output Only] Quotas assigned to this project. - Quotas []*Quota `json:"quotas,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // UsageExportLocation: The naming prefix for daily usage reports and - // the Google Cloud Storage bucket where they are stored. - UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` - - // XpnProjectStatus: [Output Only] The role this project has in a Cross - // Project Network (XPN) configuration. Currently only HOST projects are - // differentiated. - // - // Possible values: - // "HOST" - // "UNSPECIFIED_XPN_PROJECT_STATUS" - XpnProjectStatus string `json:"xpnProjectStatus,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. - // "CommonInstanceMetadata") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CommonInstanceMetadata") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Project) MarshalJSON() ([]byte, error) { - type noMethod Project - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ProjectsDisableXpnResourceRequest struct { - // XpnResource: XPN resource ID. - XpnResource *XpnResourceId `json:"xpnResource,omitempty"` - - // ForceSendFields is a list of field names (e.g. "XpnResource") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "XpnResource") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsDisableXpnResourceRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ProjectsEnableXpnResourceRequest struct { - // XpnResource: XPN resource ID. - XpnResource *XpnResourceId `json:"xpnResource,omitempty"` - - // ForceSendFields is a list of field names (e.g. "XpnResource") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "XpnResource") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsEnableXpnResourceRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ProjectsGetXpnResources struct { - // Kind: [Output Only] Type of resource. Always - // compute#projectsGetXpnResources for lists of XPN resources. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Resources: XPN resources attached to this project as their XPN host. - Resources []*XpnResourceId `json:"resources,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { - type noMethod ProjectsGetXpnResources - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ProjectsListXpnHostsRequest struct { - // Organization: Optional organization ID managed by Cloud Resource - // Manager, for which to list XPN host projects. If not specified, the - // organization will be inferred from the project. - Organization string `json:"organization,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Organization") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Organization") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsListXpnHostsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Quota: A quotas entry. -type Quota struct { - // Limit: [Output Only] Quota limit for this metric. - Limit float64 `json:"limit,omitempty"` - - // Metric: [Output Only] Name of the quota metric. - // - // Possible values: - // "AUTOSCALERS" - // "BACKEND_BUCKETS" - // "BACKEND_SERVICES" - // "COMMITMENTS" - // "CPUS" - // "CPUS_ALL_REGIONS" - // "DISKS_TOTAL_GB" - // "FIREWALLS" - // "FORWARDING_RULES" - // "HEALTH_CHECKS" - // "IMAGES" - // "INSTANCES" - // "INSTANCE_GROUPS" - // "INSTANCE_GROUP_MANAGERS" - // "INSTANCE_TEMPLATES" - // "IN_USE_ADDRESSES" - // "LOCAL_SSD_TOTAL_GB" - // "NETWORKS" - // "NVIDIA_K80_GPUS" - // "PREEMPTIBLE_CPUS" - // "REGIONAL_AUTOSCALERS" - // "REGIONAL_INSTANCE_GROUP_MANAGERS" - // "ROUTERS" - // "ROUTES" - // "SNAPSHOTS" - // "SSD_TOTAL_GB" - // "SSL_CERTIFICATES" - // "STATIC_ADDRESSES" - // "SUBNETWORKS" - // "TARGET_HTTPS_PROXIES" - // "TARGET_HTTP_PROXIES" - // "TARGET_INSTANCES" - // "TARGET_POOLS" - // "TARGET_SSL_PROXIES" - // "TARGET_VPN_GATEWAYS" - // "URL_MAPS" - // "VPN_TUNNELS" - Metric string `json:"metric,omitempty"` - - // Usage: [Output Only] Current usage of this metric. - Usage float64 `json:"usage,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Limit") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Limit") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Quota) MarshalJSON() ([]byte, error) { - type noMethod Quota - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *Quota) UnmarshalJSON(data []byte) error { - type noMethod Quota - var s1 struct { - Limit gensupport.JSONFloat64 `json:"limit"` - Usage gensupport.JSONFloat64 `json:"usage"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Limit = float64(s1.Limit) - s.Usage = float64(s1.Usage) - return nil -} - -// Reference: Represents a reference to a resource. -type Reference struct { - // Kind: [Output Only] Type of the resource. Always compute#reference - // for references. - Kind string `json:"kind,omitempty"` - - // ReferenceType: A description of the reference type with no implied - // semantics. Possible values include: - // - MEMBER_OF - ReferenceType string `json:"referenceType,omitempty"` - - // Referrer: URL of the resource which refers to the target. - Referrer string `json:"referrer,omitempty"` - - // Target: URL of the resource to which this reference points. - Target string `json:"target,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Reference) MarshalJSON() ([]byte, error) { - type noMethod Reference - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Region: Region resource. -type Region struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Deprecated: [Output Only] The deprecation status associated with this - // region. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] Textual description of the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#region for - // regions. - Kind string `json:"kind,omitempty"` - - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` - - // Quotas: [Output Only] Quotas assigned to this region. - Quotas []*Quota `json:"quotas,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] Status of the region, either UP or DOWN. - // - // Possible values: - // "DOWN" - // "UP" - Status string `json:"status,omitempty"` - - // Zones: [Output Only] A list of zones available in this region, in the - // form of resource URLs. - Zones []string `json:"zones,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Region) MarshalJSON() ([]byte, error) { - type noMethod Region - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionAutoscalerList: Contains a list of autoscalers. -type RegionAutoscalerList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of autoscalers. - Items []*Autoscaler `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] A token used to continue a truncated - // list request. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { - type noMethod RegionAutoscalerList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionInstanceGroupList: Contains a list of InstanceGroup resources. -type RegionInstanceGroupList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of InstanceGroup resources. - Items []*InstanceGroup `json:"items,omitempty"` - - // Kind: The resource type. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] The URL for this resource type. The server - // generates this URL. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionInstanceGroupManagerList: Contains a list of managed instance -// groups. -type RegionInstanceGroupManagerList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of managed instance groups. - Items []*InstanceGroupManager `json:"items,omitempty"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupManagerList for a list of managed instance - // groups that exist in th regional scope. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output only] A token used to continue a truncated - // list request. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output only] The URL for this resource type. The server - // generates this URL. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersAbandonInstancesRequest struct { - // Instances: The URLs of one or more instances to abandon. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersAbandonInstancesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersDeleteInstancesRequest struct { - // Instances: The URLs of one or more instances to delete. This can be a - // full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersDeleteInstancesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersListInstancesResponse struct { - // ManagedInstances: List of managed instances. - ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ManagedInstances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ManagedInstances") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersListInstancesResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersRecreateRequest struct { - // Instances: The URLs of one or more instances to recreate. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersRecreateRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersSetAutoHealingRequest struct { - AutoHealingPolicies []*InstanceGroupManagerAutoHealingPolicy `json:"autoHealingPolicies,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AutoHealingPolicies") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutoHealingPolicies") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersSetAutoHealingRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetAutoHealingRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersSetTargetPoolsRequest struct { - // Fingerprint: Fingerprint of the target pools information, which is a - // hash of the contents. This field is used for optimistic locking when - // you update the target pool entries. This field is optional. - Fingerprint string `json:"fingerprint,omitempty"` - - // TargetPools: The URL of all TargetPool resources to which instances - // in the instanceGroup field are added. The target pools automatically - // apply to all of the instances in the managed instance group. - TargetPools []string `json:"targetPools,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTargetPoolsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersSetTemplateRequest struct { - // InstanceTemplate: URL of the InstanceTemplate resource from which all - // new instances will be created. - InstanceTemplate string `json:"instanceTemplate,omitempty"` - - // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceTemplate") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTemplateRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupsListInstances struct { - // Id: [Output Only] Unique identifier for the resource. Defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of instances and any named ports that are assigned to - // those instances. - Items []*InstanceWithNamedPorts `json:"items,omitempty"` - - // Kind: The resource type. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstances - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupsListInstancesRequest struct { - // InstanceState: Instances in which state should be returned. Valid - // options are: 'ALL', 'RUNNING'. By default, it lists all instances. - // - // Possible values: - // "ALL" - // "RUNNING" - InstanceState string `json:"instanceState,omitempty"` - - // PortName: Name of port user is interested in. It is optional. If it - // is set, only information about this ports will be returned. If it is - // not set, all the named ports will be returned. Always lists all - // instances. - PortName string `json:"portName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "InstanceState") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceState") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstancesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupsSetNamedPortsRequest struct { - // Fingerprint: The fingerprint of the named ports information for this - // instance group. Use this optional property to prevent conflicts when - // multiple users change the named ports settings concurrently. Obtain - // the fingerprint with the instanceGroups.get method. Then, include the - // fingerprint in your request to ensure that you do not overwrite - // changes that were applied from another concurrent request. - Fingerprint string `json:"fingerprint,omitempty"` - - // NamedPorts: The list of named ports to set for this instance group. - NamedPorts []*NamedPort `json:"namedPorts,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsSetNamedPortsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionList: Contains a list of region resources. -type RegionList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Region resources. - Items []*Region `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#regionList for - // lists of regions. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionList) MarshalJSON() ([]byte, error) { - type noMethod RegionList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ResourceCommitment: Commitment for a particular resource (a -// Commitment is composed of one or more of these). -type ResourceCommitment struct { - // Amount: The amount of the resource purchased (in a type-dependent - // unit, such as bytes). - Amount int64 `json:"amount,omitempty,string"` - - // Type: Type of resource for which this commitment applies. - // - // Possible values: - // "MEMORY" - // "UNSPECIFIED" - // "VCPU" - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Amount") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Amount") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { - type noMethod ResourceCommitment - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ResourceGroupReference struct { - // Group: A URI referencing one of the instance groups listed in the - // backend service. - Group string `json:"group,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Group") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Group") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { - type noMethod ResourceGroupReference - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Route: Represents a Route resource. A route specifies how certain -// packets should be handled by the network. Routes are associated with -// instances by tags and the set of routes for a particular instance is -// called its routing table. -// -// For each packet leaving a instance, the system searches that -// instance's routing table for a single best matching route. Routes -// match packets by destination IP address, preferring smaller or more -// specific ranges over larger ones. If there is a tie, the system -// selects the route with the smallest priority value. If there is still -// a tie, it uses the layer three and four packet headers to select just -// one of the remaining matching routes. The packet is then forwarded as -// specified by the nextHop field of the winning route - either to -// another instance destination, an instance gateway, or a Google -// Compute Engine-operated gateway. -// -// Packets that do not match any route in the sending instance's routing -// table are dropped. -type Route struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DestRange: The destination range of outgoing packets that this route - // applies to. Only IPv4 is supported. - DestRange string `json:"destRange,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of this resource. Always compute#routes for - // Route resources. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: Fully-qualified URL of the network that this route applies - // to. - Network string `json:"network,omitempty"` - - // NextHopGateway: The URL to a gateway that should handle matching - // packets. You can only specify the internet gateway using a full or - // partial valid URL: - // projects//global/gateways/default-internet-gateway - NextHopGateway string `json:"nextHopGateway,omitempty"` - - // NextHopInstance: The URL to an instance that should handle matching - // packets. You can specify this as a full or partial URL. For - // example: - // https://www.googleapis.com/compute/v1/projects/project/zones/ - // zone/instances/ - NextHopInstance string `json:"nextHopInstance,omitempty"` - - // NextHopIp: The network IP address of an instance that should handle - // matching packets. Only IPv4 is supported. - NextHopIp string `json:"nextHopIp,omitempty"` - - // NextHopNetwork: The URL of the local network if it should handle - // matching packets. - NextHopNetwork string `json:"nextHopNetwork,omitempty"` - - // NextHopPeering: [Output Only] The network peering name that should - // handle matching packets, which should conform to RFC1035. - NextHopPeering string `json:"nextHopPeering,omitempty"` - - // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching - // packets. - NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` - - // Priority: The priority of this route. Priority is used to break ties - // in cases where there is more than one matching route of equal prefix - // length. In the case of two routes with equal prefix length, the one - // with the lowest-numbered priority value wins. Default value is 1000. - // Valid range is 0 through 65535. - Priority int64 `json:"priority,omitempty"` - - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. - SelfLink string `json:"selfLink,omitempty"` - - // Tags: A list of instance tags to which this route applies. - Tags []string `json:"tags,omitempty"` - - // Warnings: [Output Only] If potential misconfigurations are detected - // for this route, this field will be populated with warning messages. - Warnings []*RouteWarnings `json:"warnings,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Route) MarshalJSON() ([]byte, error) { - type noMethod Route - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RouteWarnings struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RouteWarningsData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RouteWarnings) MarshalJSON() ([]byte, error) { - type noMethod RouteWarnings - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RouteWarningsData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { - type noMethod RouteWarningsData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RouteList: Contains a list of Route resources. -type RouteList struct { - // Id: [Output Only] Unique identifier for the resource. Defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Route resources. - Items []*Route `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RouteList) MarshalJSON() ([]byte, error) { - type noMethod RouteList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Router: Router resource. -type Router struct { - // Bgp: BGP information specific to this router. - Bgp *RouterBgp `json:"bgp,omitempty"` - - // BgpPeers: BGP information that needs to be configured into the - // routing stack to establish the BGP peering. It must specify peer ASN - // and either interface name, IP, or peer IP. Please refer to RFC4273. - BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Interfaces: Router interfaces. Each interface requires either one - // linked resource (e.g. linkedVpnTunnel), or IP address and IP address - // range (e.g. ipRange), or both. - Interfaces []*RouterInterface `json:"interfaces,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` - - // Region: [Output Only] URI of the region where the router resides. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bgp") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Bgp") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Router) MarshalJSON() ([]byte, error) { - type noMethod Router - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RouterAggregatedList: Contains a list of routers. -type RouterAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A map of scoped router lists. - Items map[string]RoutersScopedList `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod RouterAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RouterBgp struct { - // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 - // private ASN, either 16-bit or 32-bit. The value will be fixed for - // this router resource. All VPN tunnels that link to this router will - // have the same local ASN. - Asn int64 `json:"asn,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Asn") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Asn") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RouterBgp) MarshalJSON() ([]byte, error) { - type noMethod RouterBgp - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RouterBgpPeer struct { - // AdvertisedRoutePriority: The priority of routes advertised to this - // BGP peer. In the case where there is more than one matching route of - // maximum length, the routes with lowest priority value win. - AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` - - // InterfaceName: Name of the interface the BGP peer is associated with. - InterfaceName string `json:"interfaceName,omitempty"` - - // IpAddress: IP address of the interface inside Google Cloud Platform. - // Only IPv4 is supported. - IpAddress string `json:"ipAddress,omitempty"` - - // Name: Name of this BGP peer. The name must be 1-63 characters long - // and comply with RFC1035. - Name string `json:"name,omitempty"` - - // PeerAsn: Peer BGP Autonomous System Number (ASN). For VPN use case, - // this value can be different for every tunnel. - PeerAsn int64 `json:"peerAsn,omitempty"` - - // PeerIpAddress: IP address of the BGP interface outside Google cloud. - // Only IPv4 is supported. - PeerIpAddress string `json:"peerIpAddress,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "AdvertisedRoutePriority") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AdvertisedRoutePriority") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { - type noMethod RouterBgpPeer - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RouterInterface struct { - // IpRange: IP address and range of the interface. The IP range must be - // in the RFC3927 link-local IP space. The value must be a - // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not - // truncate the address as it represents the IP address of the - // interface. - IpRange string `json:"ipRange,omitempty"` - - // LinkedVpnTunnel: URI of the linked VPN tunnel. It must be in the same - // region as the router. Each interface can have at most one linked - // resource and it could either be a VPN Tunnel or an interconnect - // attachment. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` - - // Name: Name of this interface entry. The name must be 1-63 characters - // long and comply with RFC1035. - Name string `json:"name,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IpRange") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IpRange") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RouterInterface) MarshalJSON() ([]byte, error) { - type noMethod RouterInterface - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RouterList: Contains a list of Router resources. -type RouterList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of Router resources. - Items []*Router `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RouterList) MarshalJSON() ([]byte, error) { - type noMethod RouterList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RouterStatus struct { - // BestRoutes: Best routes for this router's network. - BestRoutes []*Route `json:"bestRoutes,omitempty"` - - // BestRoutesForRouter: Best routes learned by this router. - BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` - - BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` - - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BestRoutes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BestRoutes") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RouterStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatus - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RouterStatusBgpPeerStatus struct { - // AdvertisedRoutes: Routes that were advertised to the remote BGP peer - AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` - - // IpAddress: IP address of the local BGP interface. - IpAddress string `json:"ipAddress,omitempty"` - - // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` - - // Name: Name of this BGP peer. Unique within the Routers resource. - Name string `json:"name,omitempty"` - - // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. - NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` - - // PeerIpAddress: IP address of the remote BGP interface. - PeerIpAddress string `json:"peerIpAddress,omitempty"` - - // State: BGP state as specified in RFC1771. - State string `json:"state,omitempty"` - - // Status: Status of the BGP peer: {UP, DOWN} - // - // Possible values: - // "DOWN" - // "UNKNOWN" - // "UP" - Status string `json:"status,omitempty"` - - // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 - // days, 23 hours, 59 minutes, 59 seconds - Uptime string `json:"uptime,omitempty"` - - // UptimeSeconds: Time this session has been up, in seconds. Format: 145 - UptimeSeconds string `json:"uptimeSeconds,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AdvertisedRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusBgpPeerStatus - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RouterStatusResponse struct { - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - Result *RouterStatus `json:"result,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RoutersPreviewResponse struct { - // Resource: Preview of given router. - Resource *Router `json:"resource,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Resource") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Resource") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { - type noMethod RoutersPreviewResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RoutersScopedList struct { - // Routers: List of routers contained in this scope. - Routers []*Router `json:"routers,omitempty"` - - // Warning: Informational warning which replaces the list of routers - // when the list is empty. - Warning *RoutersScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Routers") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Routers") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RoutersScopedListWarning: Informational warning which replaces the -// list of routers when the list is empty. -type RoutersScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RoutersScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RoutersScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Rule: A rule to be applied in a Policy. -type Rule struct { - // Action: Required - // - // Possible values: - // "ALLOW" - // "ALLOW_WITH_LOG" - // "DENY" - // "DENY_WITH_LOG" - // "LOG" - // "NO_ACTION" - Action string `json:"action,omitempty"` - - // Conditions: Additional restrictions that must be met - Conditions []*Condition `json:"conditions,omitempty"` - - // Description: Human-readable description of the rule. - Description string `json:"description,omitempty"` - - // Ins: If one or more 'in' clauses are specified, the rule matches if - // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. - Ins []string `json:"ins,omitempty"` - - // LogConfigs: The config returned to callers of - // tech.iam.IAM.CheckPolicy for any entries that match the LOG action. - LogConfigs []*LogConfig `json:"logConfigs,omitempty"` - - // NotIns: If one or more 'not_in' clauses are specified, the rule - // matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the - // entries. - NotIns []string `json:"notIns,omitempty"` - - // Permissions: A permission is a string of form '..' (e.g., - // 'storage.buckets.list'). A value of '*' matches all permissions, and - // a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. - Permissions []string `json:"permissions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Action") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Action") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Rule) MarshalJSON() ([]byte, error) { - type noMethod Rule - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SSLHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // Request: The application data to send once the SSL connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` - - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Port") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod SSLHealthCheck - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Scheduling: Sets the scheduling options for an Instance. -type Scheduling struct { - // AutomaticRestart: Specifies whether the instance should be - // automatically restarted if it is terminated by Compute Engine (not - // terminated by a user). You can only set the automatic restart option - // for standard instances. Preemptible instances cannot be automatically - // restarted. - // - // By default, this is set to true so an instance is automatically - // restarted if it is terminated by Compute Engine. - AutomaticRestart bool `json:"automaticRestart,omitempty"` - - // OnHostMaintenance: Defines the maintenance behavior for this - // instance. For standard instances, the default behavior is MIGRATE. - // For preemptible instances, the default and only possible behavior is - // TERMINATE. For more information, see Setting Instance Scheduling - // Options. - // - // Possible values: - // "MIGRATE" - // "TERMINATE" - OnHostMaintenance string `json:"onHostMaintenance,omitempty"` - - // Preemptible: Defines whether the instance is preemptible. This can - // only be set during instance creation, it cannot be set or changed - // after the instance has been created. - Preemptible bool `json:"preemptible,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutomaticRestart") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Scheduling) MarshalJSON() ([]byte, error) { - type noMethod Scheduling - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SerialPortOutput: An instance's serial console output. -type SerialPortOutput struct { - // Contents: [Output Only] The contents of the console output. - Contents string `json:"contents,omitempty"` - - // Kind: [Output Only] Type of the resource. Always - // compute#serialPortOutput for serial port output. - Kind string `json:"kind,omitempty"` - - // Next: [Output Only] The position of the next byte of content from the - // serial console output. Use this value in the next request as the - // start parameter. - Next int64 `json:"next,omitempty,string"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Start: The starting byte position of the output that was returned. - // This should match the start parameter sent with the request. If the - // serial console output exceeds the size of the buffer, older output - // will be overwritten by newer content and the start values will be - // mismatched. - Start int64 `json:"start,omitempty,string"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Contents") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Contents") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { - type noMethod SerialPortOutput - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ServiceAccount: A service account. -type ServiceAccount struct { - // Email: Email address of the service account. - Email string `json:"email,omitempty"` - - // Scopes: The list of scopes to be made available for this service - // account. - Scopes []string `json:"scopes,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Email") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Email") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { - type noMethod ServiceAccount - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Snapshot: A persistent disk snapshot resource. -type Snapshot struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. - DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#snapshot for - // Snapshot resources. - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for the labels being applied to this - // snapshot, which is essentially a hash of the labels set used for - // optimistic locking. The fingerprint is initially generated by Compute - // Engine and changes after every request to modify or update labels. - // You must always provide an up-to-date fingerprint hash in order to - // update or change labels. - // - // To see the latest fingerprint, make a get() request to retrieve a - // snapshot. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this snapshot. These can be later modified - // by the setLabels method. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // Licenses: [Output Only] A list of public visible licenses that apply - // to this snapshot. This can be because the original image had licenses - // attached (such as a Windows image). - Licenses []string `json:"licenses,omitempty"` - - // Name: Name of the resource; provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SnapshotEncryptionKey: Encrypts the snapshot using a - // customer-supplied encryption key. - // - // After you encrypt a snapshot using a customer-supplied key, you must - // provide the same key if you use the image later For example, you must - // provide the encryption key when you create a disk from the encrypted - // snapshot in a future request. - // - // Customer-supplied encryption keys do not protect access to metadata - // of the disk. - // - // If you do not provide an encryption key when creating the snapshot, - // then the snapshot will be encrypted using an automatically generated - // key and you do not need to provide a key to use the snapshot later. - SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` - - // SourceDisk: [Output Only] The source disk used to create this - // snapshot. - SourceDisk string `json:"sourceDisk,omitempty"` - - // SourceDiskEncryptionKey: The customer-supplied encryption key of the - // source disk. Required if the source disk is protected by a - // customer-supplied encryption key. - SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` - - // SourceDiskId: [Output Only] The ID value of the disk used to create - // this snapshot. This value may be used to determine whether the - // snapshot was taken from the current or a previous instance of a given - // disk name. - SourceDiskId string `json:"sourceDiskId,omitempty"` - - // Status: [Output Only] The status of the snapshot. This can be - // CREATING, DELETING, FAILED, READY, or UPLOADING. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - // "UPLOADING" - Status string `json:"status,omitempty"` - - // StorageBytes: [Output Only] A size of the the storage used by the - // snapshot. As snapshots share storage, this number is expected to - // change with snapshot creation/deletion. - StorageBytes int64 `json:"storageBytes,omitempty,string"` - - // StorageBytesStatus: [Output Only] An indicator whether storageBytes - // is in a stable state or it is being adjusted as a result of shared - // storage reallocation. This status can either be UPDATING, meaning the - // size of the snapshot is being updated, or UP_TO_DATE, meaning the - // size of the snapshot is up-to-date. - // - // Possible values: - // "UPDATING" - // "UP_TO_DATE" - StorageBytesStatus string `json:"storageBytesStatus,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Snapshot) MarshalJSON() ([]byte, error) { - type noMethod Snapshot - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SnapshotList: Contains a list of Snapshot resources. -type SnapshotList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Snapshot resources. - Items []*Snapshot `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SnapshotList) MarshalJSON() ([]byte, error) { - type noMethod SnapshotList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SslCertificate: An SslCertificate resource. This resource provides a -// mechanism to upload an SSL key and certificate to the load balancer -// to serve secure connections from the user. -type SslCertificate struct { - // Certificate: A local certificate file. The certificate must be in PEM - // format. The certificate chain must be no greater than 5 certs long. - // The chain must include at least one intermediate cert. - Certificate string `json:"certificate,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always - // compute#sslCertificate for SSL certificates. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // PrivateKey: A write-only private key in PEM format. Only insert - // requests will include this field. - PrivateKey string `json:"privateKey,omitempty"` - - // SelfLink: [Output only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Certificate") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Certificate") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SslCertificate) MarshalJSON() ([]byte, error) { - type noMethod SslCertificate - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SslCertificateList: Contains a list of SslCertificate resources. -type SslCertificateList struct { - // Id: [Output Only] Unique identifier for the resource. Defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of SslCertificate resources. - Items []*SslCertificate `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SslCertificateList) MarshalJSON() ([]byte, error) { - type noMethod SslCertificateList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Subnetwork: A Subnetwork resource. -type Subnetwork struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // GatewayAddress: [Output Only] The gateway address for default routes - // to reach destination addresses outside this subnetwork. - GatewayAddress string `json:"gatewayAddress,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // IpCidrRange: The range of internal addresses that are owned by this - // subnetwork. Provide this property when you create the subnetwork. For - // example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and - // non-overlapping within a network. Only IPv4 is supported. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#subnetwork - // for Subnetwork resources. - Kind string `json:"kind,omitempty"` - - // Name: The name of the resource, provided by the client when initially - // creating the resource. The name must be 1-63 characters long, and - // comply with RFC1035. Specifically, the name must be 1-63 characters - // long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? - // which means the first character must be a lowercase letter, and all - // following characters must be a dash, lowercase letter, or digit, - // except the last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: The URL of the network to which this subnetwork belongs, - // provided by the client when initially creating the subnetwork. Only - // networks that are in the distributed mode can have subnetworks. - Network string `json:"network,omitempty"` - - // PrivateIpGoogleAccess: Whether the VMs in this subnet can access - // Google services without assigned external IP addresses. - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - - // Region: URL of the region where the Subnetwork resides. - Region string `json:"region,omitempty"` - - // SecondaryIpRanges: An array of configurations for secondary IP ranges - // for VM instances contained in this subnetwork. The primary IP of such - // VM must belong to the primary ipCidrRange of the subnetwork. The - // alias IPs may belong to either primary or secondary ranges. - SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Subnetwork) MarshalJSON() ([]byte, error) { - type noMethod Subnetwork - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SubnetworkAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output] A map of scoped Subnetwork lists. - Items map[string]SubnetworksScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#subnetworkAggregatedList for aggregated lists of subnetworks. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SubnetworkList: Contains a list of Subnetwork resources. -type SubnetworkList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: The Subnetwork resources. - Items []*Subnetwork `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#subnetworkList - // for lists of subnetworks. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworkList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SubnetworkSecondaryRange: Represents a secondary IP range of a -// subnetwork. -type SubnetworkSecondaryRange struct { - // IpCidrRange: The range of IP addresses belonging to this subnetwork - // secondary range. Provide this property when you create the - // subnetwork. Ranges must be unique and non-overlapping with all - // primary and secondary IP ranges within a network. Only IPv4 is - // supported. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // RangeName: The name associated with this subnetwork secondary range, - // used when adding an alias IP range to a VM instance. The name must be - // 1-63 characters long, and comply with RFC1035. The name must be - // unique within the subnetwork. - RangeName string `json:"rangeName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkSecondaryRange - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SubnetworksExpandIpCidrRangeRequest struct { - // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses - // that are legal on this Subnetwork. This range should be disjoint from - // other subnetworks within this network. This range can only be larger - // than (i.e. a superset of) the range previously defined before the - // update. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksExpandIpCidrRangeRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SubnetworksScopedList struct { - // Subnetworks: List of subnetworks contained in this scope. - Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` - - // Warning: An informational warning that appears when the list of - // addresses is empty. - Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Subnetworks") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Subnetworks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SubnetworksScopedListWarning: An informational warning that appears -// when the list of addresses is empty. -type SubnetworksScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SubnetworksScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SubnetworksSetPrivateIpGoogleAccessRequest struct { - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "PrivateIpGoogleAccess") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "PrivateIpGoogleAccess") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksSetPrivateIpGoogleAccessRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TCPHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // Request: The application data to send once the TCP connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` - - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Port") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod TCPHealthCheck - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Tags: A set of instance tags. -type Tags struct { - // Fingerprint: Specifies a fingerprint for this request, which is - // essentially a hash of the metadata's contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update metadata. You must - // always provide an up-to-date fingerprint hash in order to update or - // change metadata. - // - // To see the latest fingerprint, make get() request to the instance. - Fingerprint string `json:"fingerprint,omitempty"` - - // Items: An array of tags. Each tag must be 1-63 characters long, and - // comply with RFC1035. - Items []string `json:"items,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Tags) MarshalJSON() ([]byte, error) { - type noMethod Tags - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an -// HTTP proxy. -type TargetHttpProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy - // for target HTTP proxies. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // UrlMap: URL to the UrlMap resource that defines the mapping from URL - // to the BackendService. - UrlMap string `json:"urlMap,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetHttpProxyList: A list of TargetHttpProxy resources. -type TargetHttpProxyList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetHttpProxy resources. - Items []*TargetHttpProxy `json:"items,omitempty"` - - // Kind: Type of resource. Always compute#targetHttpProxyList for lists - // of target HTTP proxies. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxyList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetHttpsProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of SslCertificate resources to associate - // with this TargetHttpsProxy resource. Currently exactly one - // SslCertificate resource must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // ForceSendFields is a list of field names (e.g. "SslCertificates") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "SslCertificates") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxiesSetSslCertificatesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines -// an HTTPS proxy. -type TargetHttpsProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy - // for target HTTPS proxies. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections between users and the load balancer. - // Currently, exactly one SSL certificate must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource - // that defines the mapping from URL to the BackendService. For example, - // the following are all valid URLs for specifying a URL map: - // - - // https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map - // - projects/project/global/urlMaps/url-map - // - global/urlMaps/url-map - UrlMap string `json:"urlMap,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. -type TargetHttpsProxyList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetHttpsProxy resources. - Items []*TargetHttpsProxy `json:"items,omitempty"` - - // Kind: Type of resource. Always compute#targetHttpsProxyList for lists - // of target HTTPS proxies. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxyList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetInstance: A TargetInstance resource. This resource defines an -// endpoint instance that terminates traffic of certain protocols. -type TargetInstance struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Instance: A URL to the virtual machine instance that handles traffic - // for this target instance. When creating a target instance, you can - // provide the fully-qualified URL or a valid partial URL to the desired - // virtual machine. For example, the following are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance - // - projects/project/zones/zone/instances/instance - // - zones/zone/instances/instance - Instance string `json:"instance,omitempty"` - - // Kind: [Output Only] The type of the resource. Always - // compute#targetInstance for target instances. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. - // Currently only NO_NAT (default value) is supported. - // - // Possible values: - // "NO_NAT" - NatPolicy string `json:"natPolicy,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Zone: [Output Only] URL of the zone where the target instance - // resides. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetInstance) MarshalJSON() ([]byte, error) { - type noMethod TargetInstance - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetInstanceAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A map of scoped target instance lists. - Items map[string]TargetInstancesScopedList `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetInstanceList: Contains a list of TargetInstance resources. -type TargetInstanceList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetInstance resources. - Items []*TargetInstance `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetInstancesScopedList struct { - // TargetInstances: List of target instances contained in this scope. - TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` - - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "TargetInstances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TargetInstances") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetInstancesScopedListWarning: Informational warning which -// replaces the list of addresses when the list is empty. -type TargetInstancesScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetInstancesScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetPool: A TargetPool resource. This resource defines a pool of -// instances, an associated HttpHealthCheck resource, and the fallback -// target pool. -type TargetPool struct { - // BackupPool: This field is applicable only when the containing target - // pool is serving a forwarding rule as the primary pool, and its - // failoverRatio field is properly set to a value between [0, - // 1]. - // - // backupPool and failoverRatio together define the fallback behavior of - // the primary target pool: if the ratio of the healthy instances in the - // primary pool is at or below failoverRatio, traffic arriving at the - // load-balanced IP will be directed to the backup pool. - // - // In case where failoverRatio and backupPool are not set, or all the - // instances in the backup pool are unhealthy, the traffic will be - // directed back to the primary pool in the "force" mode, where traffic - // will be spread to the healthy instances with the best effort, or to - // all instances when no instance is healthy. - BackupPool string `json:"backupPool,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // FailoverRatio: This field is applicable only when the containing - // target pool is serving a forwarding rule as the primary pool (i.e., - // not as a backup pool to some other target pool). The value of the - // field must be in [0, 1]. - // - // If set, backupPool must also be set. They together define the - // fallback behavior of the primary target pool: if the ratio of the - // healthy instances in the primary pool is at or below this number, - // traffic arriving at the load-balanced IP will be directed to the - // backup pool. - // - // In case where failoverRatio is not set or all the instances in the - // backup pool are unhealthy, the traffic will be directed back to the - // primary pool in the "force" mode, where traffic will be spread to the - // healthy instances with the best effort, or to all instances when no - // instance is healthy. - FailoverRatio float64 `json:"failoverRatio,omitempty"` - - // HealthChecks: The URL of the HttpHealthCheck resource. A member - // instance in this pool is considered healthy if and only if the health - // checks pass. An empty list means all member instances will be - // considered healthy at all times. Only HttpHealthChecks are supported. - // Only one health check may be specified. - HealthChecks []string `json:"healthChecks,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Instances: A list of resource URLs to the virtual machine instances - // serving this pool. They must live in zones contained in the same - // region as this pool. - Instances []string `json:"instances,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#targetPool - // for target pools. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Region: [Output Only] URL of the region where the target pool - // resides. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SessionAffinity: Sesssion affinity option, must be one of the - // following values: - // NONE: Connections from the same client IP may go to any instance in - // the pool. - // CLIENT_IP: Connections from the same client IP will go to the same - // instance in the pool while that instance remains - // healthy. - // CLIENT_IP_PROTO: Connections from the same client IP with the same IP - // protocol will go to the same instance in the pool while that instance - // remains healthy. - // - // Possible values: - // "CLIENT_IP" - // "CLIENT_IP_PORT_PROTO" - // "CLIENT_IP_PROTO" - // "GENERATED_COOKIE" - // "NONE" - SessionAffinity string `json:"sessionAffinity,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "BackupPool") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BackupPool") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPool) MarshalJSON() ([]byte, error) { - type noMethod TargetPool - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *TargetPool) UnmarshalJSON(data []byte) error { - type noMethod TargetPool - var s1 struct { - FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.FailoverRatio = float64(s1.FailoverRatio) - return nil -} - -type TargetPoolAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource. Defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A map of scoped target pool lists. - Items map[string]TargetPoolsScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#targetPoolAggregatedList for aggregated lists of target - // pools. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetPoolInstanceHealth struct { - HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#targetPoolInstanceHealth when checking the health of an - // instance. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "HealthStatus") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HealthStatus") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolInstanceHealth - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetPoolList: Contains a list of TargetPool resources. -type TargetPoolList struct { - // Id: [Output Only] Unique identifier for the resource. Defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetPool resources. - Items []*TargetPool `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#targetPoolList - // for lists of target pools. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetPoolsAddHealthCheckRequest struct { - // HealthChecks: The HttpHealthCheck to add to the target pool. - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` - - // ForceSendFields is a list of field names (e.g. "HealthChecks") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HealthChecks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddHealthCheckRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetPoolsAddInstanceRequest struct { - // Instances: A full or partial URL to an instance to add to this target - // pool. This can be a full or partial URL. For example, the following - // are valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name - // - projects/project-id/zones/zone/instances/instance-name - // - zones/zone/instances/instance-name - Instances []*InstanceReference `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddInstanceRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetPoolsRemoveHealthCheckRequest struct { - // HealthChecks: Health check URL to be removed. This can be a full or - // valid partial URL. For example, the following are valid URLs: - // - - // https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check - // - projects/project/global/httpHealthChecks/health-check - // - global/httpHealthChecks/health-check - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` - - // ForceSendFields is a list of field names (e.g. "HealthChecks") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HealthChecks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveHealthCheckRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetPoolsRemoveInstanceRequest struct { - // Instances: URLs of the instances to be removed from target pool. - Instances []*InstanceReference `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveInstanceRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetPoolsScopedList struct { - // TargetPools: List of target pools contained in this scope. - TargetPools []*TargetPool `json:"targetPools,omitempty"` - - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "TargetPools") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TargetPools") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetPoolsScopedListWarning: Informational warning which replaces -// the list of addresses when the list is empty. -type TargetPoolsScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetPoolsScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetReference struct { - Target string `json:"target,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Target") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Target") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetReference) MarshalJSON() ([]byte, error) { - type noMethod TargetReference - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetSslProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetSslProxy. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Service") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetBackendServiceRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetSslProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ProxyHeader") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetProxyHeaderRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetSslProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of URLs to SslCertificate resources to - // associate with this TargetSslProxy. Currently exactly one ssl - // certificate must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // ForceSendFields is a list of field names (e.g. "SslCertificates") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "SslCertificates") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetSslCertificatesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetSslProxy: A TargetSslProxy resource. This resource defines an -// SSL proxy. -type TargetSslProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always - // compute#targetSslProxy for target SSL proxies. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` - - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections to Backends. Currently exactly one SSL - // certificate must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetSslProxyList: Contains a list of TargetSslProxy resources. -type TargetSslProxyList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetSslProxy resources. - Items []*TargetSslProxy `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxyList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetTcpProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetTcpProxy. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Service") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetBackendServiceRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetTcpProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ProxyHeader") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetProxyHeaderRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetTcpProxy: A TargetTcpProxy resource. This resource defines a -// TCP proxy. -type TargetTcpProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always - // compute#targetTcpProxy for target TCP proxies. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. -type TargetTcpProxyList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetTcpProxy resources. - Items []*TargetTcpProxy `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxyList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetVpnGateway: Represents a Target VPN gateway resource. -type TargetVpnGateway struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule - // resources. ForwardingRules are created using - // compute.forwardingRules.insert and associated to a VPN gateway. - ForwardingRules []string `json:"forwardingRules,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: URL of the network to which this VPN gateway is attached. - // Provided by the client when the VPN gateway is created. - Network string `json:"network,omitempty"` - - // Region: [Output Only] URL of the region where the target VPN gateway - // resides. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] The status of the VPN gateway. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - Status string `json:"status,omitempty"` - - // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. - // VpnTunnels are created using compute.vpntunnels.insert method and - // associated to a VPN gateway. - Tunnels []string `json:"tunnels,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGateway - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetVpnGatewayAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: A map of scoped target vpn gateway lists. - Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. -type TargetVpnGatewayList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of TargetVpnGateway resources. - Items []*TargetVpnGateway `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetVpnGatewaysScopedList struct { - // TargetVpnGateways: [Output Only] List of target vpn gateways - // contained in this scope. - TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of addresses when the list is empty. - Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TargetVpnGateways") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetVpnGatewaysScopedListWarning: [Output Only] Informational -// warning which replaces the list of addresses when the list is empty. -type TargetVpnGatewaysScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetVpnGatewaysScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TestFailure struct { - ActualService string `json:"actualService,omitempty"` - - ExpectedService string `json:"expectedService,omitempty"` - - Host string `json:"host,omitempty"` - - Path string `json:"path,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ActualService") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ActualService") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestFailure) MarshalJSON() ([]byte, error) { - type noMethod TestFailure - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TestPermissionsRequest struct { - // Permissions: The set of permissions to check for the 'resource'. - // Permissions with wildcards (such as '*' or 'storage.*') are not - // allowed. - Permissions []string `json:"permissions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { - type noMethod TestPermissionsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TestPermissionsResponse struct { - // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is allowed. - Permissions []string `json:"permissions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { - type noMethod TestPermissionsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type UDPHealthCheck struct { - // Port: The UDP port number for the health check request. Valid values - // are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // Request: Raw data of request to send in payload of UDP packet. It is - // an error if this is empty. The request data can only be ASCII. - Request string `json:"request,omitempty"` - - // Response: The bytes to match against the beginning of the response - // data. It is an error if this is empty. The response data can only be - // ASCII. - Response string `json:"response,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Port") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UDPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod UDPHealthCheck - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UrlMap: A UrlMap resource. This resource defines the mapping from URL -// to the BackendService resource, based on the "longest-match" of the -// URL's host and path. -type UrlMap struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // DefaultService: The URL of the BackendService resource if none of the - // hostRules match. - DefaultService string `json:"defaultService,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a UrlMap. An up-to-date - // fingerprint must be provided in order to update the UrlMap. - Fingerprint string `json:"fingerprint,omitempty"` - - // HostRules: The list of HostRules to use against the URL. - HostRules []*HostRule `json:"hostRules,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#urlMaps for - // url maps. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // PathMatchers: The list of named PathMatchers to use against the URL. - PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Tests: The list of expected URL mappings. Request to update this - // UrlMap will succeed only if all of the test cases pass. - Tests []*UrlMapTest `json:"tests,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *UrlMap) MarshalJSON() ([]byte, error) { - type noMethod UrlMap - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UrlMapList: Contains a list of UrlMap resources. -type UrlMapList struct { - // Id: [Output Only] Unique identifier for the resource. Set by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of UrlMap resources. - Items []*UrlMap `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UrlMapList) MarshalJSON() ([]byte, error) { - type noMethod UrlMapList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type UrlMapReference struct { - UrlMap string `json:"urlMap,omitempty"` - - // ForceSendFields is a list of field names (e.g. "UrlMap") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "UrlMap") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UrlMapReference) MarshalJSON() ([]byte, error) { - type noMethod UrlMapReference - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UrlMapTest: Message for the expected URL mappings. -type UrlMapTest struct { - // Description: Description of this test case. - Description string `json:"description,omitempty"` - - // Host: Host portion of the URL. - Host string `json:"host,omitempty"` - - // Path: Path portion of the URL. - Path string `json:"path,omitempty"` - - // Service: Expected BackendService resource the given URL should be - // mapped to. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UrlMapTest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapTest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UrlMapValidationResult: Message representing the validation result -// for a UrlMap. -type UrlMapValidationResult struct { - LoadErrors []string `json:"loadErrors,omitempty"` - - // LoadSucceeded: Whether the given UrlMap can be successfully loaded. - // If false, 'loadErrors' indicates the reasons. - LoadSucceeded bool `json:"loadSucceeded,omitempty"` - - TestFailures []*TestFailure `json:"testFailures,omitempty"` - - // TestPassed: If successfully loaded, this field indicates whether the - // test passed. If false, 'testFailures's indicate the reason of - // failure. - TestPassed bool `json:"testPassed,omitempty"` - - // ForceSendFields is a list of field names (e.g. "LoadErrors") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LoadErrors") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { - type noMethod UrlMapValidationResult - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type UrlMapsValidateRequest struct { - // Resource: Content of the UrlMap to be validated. - Resource *UrlMap `json:"resource,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Resource") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Resource") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type UrlMapsValidateResponse struct { - Result *UrlMapValidationResult `json:"result,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Result") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Result") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UsageExportLocation: The location in Cloud Storage and naming method -// of the daily usage report. Contains bucket_name and report_name -// prefix. -type UsageExportLocation struct { - // BucketName: The name of an existing bucket in Cloud Storage where the - // usage report object is stored. The Google Service Account is granted - // write access to this bucket. This can either be the bucket name by - // itself, such as example-bucket, or the bucket name with gs:// or - // https://storage.googleapis.com/ in front of it, such as - // gs://example-bucket. - BucketName string `json:"bucketName,omitempty"` - - // ReportNamePrefix: An optional prefix for the name of the usage report - // object stored in bucketName. If not supplied, defaults to usage. The - // report is stored as a CSV file named - // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the - // usage according to Pacific Time. If you supply a prefix, it should - // conform to Cloud Storage object naming conventions. - ReportNamePrefix string `json:"reportNamePrefix,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BucketName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BucketName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { - type noMethod UsageExportLocation - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type VpnTunnel struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DetailedStatus: [Output Only] Detailed status message for the VPN - // tunnel. - DetailedStatus string `json:"detailedStatus,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // IkeVersion: IKE protocol version to use when establishing the VPN - // tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. - // Default version is 2. - IkeVersion int64 `json:"ikeVersion,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. - Kind string `json:"kind,omitempty"` - - // LocalTrafficSelector: Local traffic selector to use when establishing - // the VPN tunnel with peer VPN gateway. The value should be a CIDR - // formatted string, for example: 192.168.0.0/16. The ranges should be - // disjoint. Only IPv4 is supported. - LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. - PeerIp string `json:"peerIp,omitempty"` - - // Region: [Output Only] URL of the region where the VPN tunnel resides. - Region string `json:"region,omitempty"` - - // RemoteTrafficSelector: Remote traffic selectors to use when - // establishing the VPN tunnel with peer VPN gateway. The value should - // be a CIDR formatted string, for example: 192.168.0.0/16. The ranges - // should be disjoint. Only IPv4 is supported. - RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` - - // Router: URL of router resource to be used for dynamic routing. - Router string `json:"router,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SharedSecret: Shared secret used to set the secure session between - // the Cloud VPN gateway and the peer VPN gateway. - SharedSecret string `json:"sharedSecret,omitempty"` - - // SharedSecretHash: Hash of the shared secret. - SharedSecretHash string `json:"sharedSecretHash,omitempty"` - - // Status: [Output Only] The status of the VPN tunnel. - // - // Possible values: - // "ALLOCATING_RESOURCES" - // "AUTHORIZATION_ERROR" - // "DEPROVISIONING" - // "ESTABLISHED" - // "FAILED" - // "FIRST_HANDSHAKE" - // "NEGOTIATION_FAILURE" - // "NETWORK_ERROR" - // "NO_INCOMING_PACKETS" - // "PROVISIONING" - // "REJECTED" - // "WAITING_FOR_FULL_CONFIG" - Status string `json:"status,omitempty"` - - // TargetVpnGateway: URL of the VPN gateway with which this VPN tunnel - // is associated. Provided by the client when the VPN tunnel is created. - TargetVpnGateway string `json:"targetVpnGateway,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *VpnTunnel) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnel - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type VpnTunnelAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A map of scoped vpn tunnel lists. - Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelAggregatedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// VpnTunnelList: Contains a list of VpnTunnel resources. -type VpnTunnelList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of VpnTunnel resources. - Items []*VpnTunnel `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type VpnTunnelsScopedList struct { - // VpnTunnels: List of vpn tunnels contained in this scope. - VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` - - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "VpnTunnels") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "VpnTunnels") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// VpnTunnelsScopedListWarning: Informational warning which replaces the -// list of addresses when the list is empty. -type VpnTunnelsScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type VpnTunnelsScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type XpnHostList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of XPN host project URLs. - Items []*Project `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#xpnHostList for - // lists of XPN hosts. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *XpnHostList) MarshalJSON() ([]byte, error) { - type noMethod XpnHostList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// XpnResourceId: XpnResourceId -type XpnResourceId struct { - // Id: The ID of the XPN resource. In the case of projects, this field - // matches the project's name, not the canonical ID. - Id string `json:"id,omitempty"` - - // Type: The type of the XPN resource. - // - // Possible values: - // "PROJECT" - // "XPN_RESOURCE_TYPE_UNSPECIFIED" - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *XpnResourceId) MarshalJSON() ([]byte, error) { - type noMethod XpnResourceId - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Zone: A Zone resource. -type Zone struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Deprecated: [Output Only] The deprecation status associated with this - // zone. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] Textual description of the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#zone for - // zones. - Kind string `json:"kind,omitempty"` - - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` - - // Region: [Output Only] Full URL reference to the region which hosts - // the zone. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] Status of the zone, either UP or DOWN. - // - // Possible values: - // "DOWN" - // "UP" - Status string `json:"status,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Zone) MarshalJSON() ([]byte, error) { - type noMethod Zone - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ZoneList: Contains a list of zone resources. -type ZoneList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Zone resources. - Items []*Zone `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ZoneList) MarshalJSON() ([]byte, error) { - type noMethod ZoneList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ZoneSetLabelsRequest struct { - // LabelFingerprint: The fingerprint of the previous set of labels for - // this resource, used to detect conflicts. The fingerprint is initially - // generated by Compute Engine and changes after every request to modify - // or update labels. You must always provide an up-to-date fingerprint - // hash in order to update or change labels. Make a get() request to the - // resource to get the latest fingerprint. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: The labels to set for this resource. - Labels map[string]string `json:"labels,omitempty"` - - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod ZoneSetLabelsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// method id "compute.acceleratorTypes.aggregatedList": - -type AcceleratorTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of accelerator types. -func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { - c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.acceleratorTypes.aggregatedList" call. -// Exactly one of *AcceleratorTypeAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &AcceleratorTypeAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of accelerator types.", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/acceleratorTypes", - // "response": { - // "$ref": "AcceleratorTypeAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.acceleratorTypes.get": - -type AcceleratorTypesGetCall struct { - s *Service - project string - zone string - acceleratorType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified accelerator type. Get a list of available -// accelerator types by making a list() request. -func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { - c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.acceleratorType = acceleratorType - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AcceleratorTypesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "acceleratorType": c.acceleratorType, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.acceleratorTypes.get" call. -// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AcceleratorType.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &AcceleratorType{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified accelerator type. Get a list of available accelerator types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.get", - // "parameterOrder": [ - // "project", - // "zone", - // "acceleratorType" - // ], - // "parameters": { - // "acceleratorType": { - // "description": "Name of the accelerator type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", - // "response": { - // "$ref": "AcceleratorType" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.acceleratorTypes.list": - -type AcceleratorTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of accelerator types available to the -// specified project. -func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { - c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AcceleratorTypesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.acceleratorTypes.list" call. -// Exactly one of *AcceleratorTypeList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AcceleratorTypeList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &AcceleratorTypeList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of accelerator types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.list", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/acceleratorTypes", - // "response": { - // "$ref": "AcceleratorTypeList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.aggregatedList": - -type AddressesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of addresses. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList -func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { - c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AddressesAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.addresses.aggregatedList" call. -// Exactly one of *AddressAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AddressAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &AddressAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of addresses.", - // "httpMethod": "GET", - // "id": "compute.addresses.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/addresses", - // "response": { - // "$ref": "AddressAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.delete": - -type AddressesDeleteCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete -func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { - c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.address = address - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AddressesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.addresses.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified address resource.", - // "httpMethod": "DELETE", - // "id": "compute.addresses.delete", - // "parameterOrder": [ - // "project", - // "region", - // "address" - // ], - // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/addresses/{address}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.addresses.get": - -type AddressesGetCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get -func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { - c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.address = address - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AddressesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.addresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Address{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified address resource.", - // "httpMethod": "GET", - // "id": "compute.addresses.get", - // "parameterOrder": [ - // "project", - // "region", - // "address" - // ], - // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/addresses/{address}", - // "response": { - // "$ref": "Address" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.addresses.insert": - -type AddressesInsertCall struct { - s *Service - project string - region string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates an address resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert -func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { - c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.address = address - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AddressesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.addresses.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates an address resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.addresses.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/addresses", - // "request": { - // "$ref": "Address" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.addresses.list": - -type AddressesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of addresses contained within the specified -// region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list -func (r *AddressesService) List(project string, region string) *AddressesListCall { - c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *AddressesListCall) Filter(filter string) *AddressesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AddressesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.addresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &AddressList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of addresses contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.addresses.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/addresses", - // "response": { - // "$ref": "AddressList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.testIamPermissions": - -type AddressesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *AddressesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *AddressesTestIamPermissionsCall { - c := &AddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *AddressesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AddressesTestIamPermissionsCall) Context(ctx context.Context) *AddressesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AddressesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.addresses.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.addresses.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/addresses/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.autoscalers.aggregatedList": - -type AutoscalersAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of autoscalers. -func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { - c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AutoscalersAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.autoscalers.aggregatedList" call. -// Exactly one of *AutoscalerAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &AutoscalerAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of autoscalers.", - // "httpMethod": "GET", - // "id": "compute.autoscalers.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/autoscalers", - // "response": { - // "$ref": "AutoscalerAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.autoscalers.delete": - -type AutoscalersDeleteCall struct { - s *Service - project string - zone string - autoscaler string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified autoscaler. -func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { - c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AutoscalersDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.autoscalers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified autoscaler.", - // "httpMethod": "DELETE", - // "id": "compute.autoscalers.delete", - // "parameterOrder": [ - // "project", - // "zone", - // "autoscaler" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.autoscalers.get": - -type AutoscalersGetCall struct { - s *Service - project string - zone string - autoscaler string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified autoscaler resource. Get a list of -// available autoscalers by making a list() request. -func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { - c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AutoscalersGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.autoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Autoscaler{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified autoscaler resource. Get a list of available autoscalers by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.autoscalers.get", - // "parameterOrder": [ - // "project", - // "zone", - // "autoscaler" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", - // "response": { - // "$ref": "Autoscaler" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.autoscalers.insert": - -type AutoscalersInsertCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { - c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AutoscalersInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.autoscalers.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.autoscalers.insert", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.autoscalers.list": - -type AutoscalersListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of autoscalers contained within the specified -// zone. -func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { - c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AutoscalersListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.autoscalers.list" call. -// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AutoscalerList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &AutoscalerList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of autoscalers contained within the specified zone.", - // "httpMethod": "GET", - // "id": "compute.autoscalers.list", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/autoscalers", - // "response": { - // "$ref": "AutoscalerList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.autoscalers.patch": - -type AutoscalersPatchCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports patch semantics. -func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { - c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AutoscalersPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.autoscalers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.autoscalers.patch", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.autoscalers.testIamPermissions": - -type AutoscalersTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *AutoscalersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *AutoscalersTestIamPermissionsCall { - c := &AutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *AutoscalersTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AutoscalersTestIamPermissionsCall) Context(ctx context.Context) *AutoscalersTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AutoscalersTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.autoscalers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.autoscalers.testIamPermissions", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.autoscalers.update": - -type AutoscalersUpdateCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { - c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AutoscalersUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.autoscalers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.autoscalers.update", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendBuckets.delete": - -type BackendBucketsDeleteCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified BackendBucket resource. -func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { - c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendBucket = backendBucket - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendBucketsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendBuckets.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified BackendBucket resource.", - // "httpMethod": "DELETE", - // "id": "compute.backendBuckets.delete", - // "parameterOrder": [ - // "project", - // "backendBucket" - // ], - // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendBuckets.get": - -type BackendBucketsGetCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified BackendBucket resource. Get a list of -// available backend buckets by making a list() request. -func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { - c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendBucket = backendBucket - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendBucketsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendBuckets.get" call. -// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendBucket.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendBucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified BackendBucket resource. Get a list of available backend buckets by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.backendBuckets.get", - // "parameterOrder": [ - // "project", - // "backendBucket" - // ], - // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", - // "response": { - // "$ref": "BackendBucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendBuckets.insert": - -type BackendBucketsInsertCall struct { - s *Service - project string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a BackendBucket resource in the specified project -// using the data included in the request. -func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { - c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendbucket = backendbucket - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendBucketsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendBuckets.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.backendBuckets.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendBuckets", - // "request": { - // "$ref": "BackendBucket" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendBuckets.list": - -type BackendBucketsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of BackendBucket resources available to the -// specified project. -func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { - c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendBucketsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendBuckets.list" call. -// Exactly one of *BackendBucketList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendBucketList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendBucketList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of BackendBucket resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.backendBuckets.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendBuckets", - // "response": { - // "$ref": "BackendBucketList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.backendBuckets.patch": - -type BackendBucketsPatchCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates the specified BackendBucket resource with the data -// included in the request. This method supports patch semantics. -func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { - c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendBucketsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendBuckets.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.backendBuckets.patch", - // "parameterOrder": [ - // "project", - // "backendBucket" - // ], - // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", - // "request": { - // "$ref": "BackendBucket" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendBuckets.update": - -type BackendBucketsUpdateCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified BackendBucket resource with the data -// included in the request. -func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { - c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendBucketsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendBuckets.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified BackendBucket resource with the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.backendBuckets.update", - // "parameterOrder": [ - // "project", - // "backendBucket" - // ], - // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", - // "request": { - // "$ref": "BackendBucket" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendServices.aggregatedList": - -type BackendServicesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves the list of all BackendService resources, -// regional and global, available to the specified project. -func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { - c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.aggregatedList" call. -// Exactly one of *BackendServiceAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *BackendServiceAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendServiceAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.backendServices.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Name of the project scoping this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/backendServices", - // "response": { - // "$ref": "BackendServiceAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.backendServices.delete": - -type BackendServicesDeleteCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified BackendService resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete -func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { - c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified BackendService resource.", - // "httpMethod": "DELETE", - // "id": "compute.backendServices.delete", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendServices.get": - -type BackendServicesGetCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified BackendService resource. Get a list of -// available backend services by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get -func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { - c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendService{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified BackendService resource. Get a list of available backend services by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.backendServices.get", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}", - // "response": { - // "$ref": "BackendService" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendServices.getHealth": - -type BackendServicesGetHealthCall struct { - s *Service - project string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// GetHealth: Gets the most recent health check results for this -// BackendService. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth -func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { - c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesGetHealthCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendServiceGroupHealth{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the most recent health check results for this BackendService.", - // "httpMethod": "POST", - // "id": "compute.backendServices.getHealth", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the queried instance belongs.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, - // "response": { - // "$ref": "BackendServiceGroupHealth" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendServices.insert": - -type BackendServicesInsertCall struct { - s *Service - project string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a BackendService resource in the specified project -// using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a backend -// service. Read Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert -func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { - c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendservice = backendservice - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "POST", - // "id": "compute.backendServices.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices", - // "request": { - // "$ref": "BackendService" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendServices.list": - -type BackendServicesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of BackendService resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list -func (r *BackendServicesService) List(project string) *BackendServicesListCall { - c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendServiceList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of BackendService resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.backendServices.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices", - // "response": { - // "$ref": "BackendServiceList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.backendServices.patch": - -type BackendServicesPatchCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports patch semantics. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch -func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { - c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.backendservice = backendservice - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.backendServices.patch", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendServices.testIamPermissions": - -type BackendServicesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *BackendServicesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendServicesTestIamPermissionsCall { - c := &BackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendServicesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesTestIamPermissionsCall) Context(ctx context.Context) *BackendServicesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.backendServices.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendServices.update": - -type BackendServicesUpdateCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update -func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { - c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.backendservice = backendservice - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "PUT", - // "id": "compute.backendServices.update", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.diskTypes.aggregatedList": - -type DiskTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of disk types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList -func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { - c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DiskTypesAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.diskTypes.aggregatedList" call. -// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &DiskTypeAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of disk types.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/diskTypes", - // "response": { - // "$ref": "DiskTypeAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.diskTypes.get": - -type DiskTypesGetCall struct { - s *Service - project string - zone string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified disk type. Get a list of available disk -// types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get -func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { - c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.diskType = diskType - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DiskTypesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "diskType": c.diskType, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.diskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &DiskType{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified disk type. Get a list of available disk types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.get", - // "parameterOrder": [ - // "project", - // "zone", - // "diskType" - // ], - // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/diskTypes/{diskType}", - // "response": { - // "$ref": "DiskType" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.diskTypes.list": - -type DiskTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of disk types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list -func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { - c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DiskTypesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.diskTypes.list" call. -// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *DiskTypeList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &DiskTypeList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of disk types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.list", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/diskTypes", - // "response": { - // "$ref": "DiskTypeList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.aggregatedList": - -type DisksAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of persistent disks. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList -func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { - c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DisksAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.disks.aggregatedList" call. -// Exactly one of *DiskAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &DiskAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of persistent disks.", - // "httpMethod": "GET", - // "id": "compute.disks.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/disks", - // "response": { - // "$ref": "DiskAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.createSnapshot": - -type DisksCreateSnapshotCall struct { - s *Service - project string - zone string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// CreateSnapshot: Creates a snapshot of a specified persistent disk. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot -func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { - c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.disk = disk - c.snapshot = snapshot - return c -} - -// GuestFlush sets the optional parameter "guestFlush": -func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { - c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DisksCreateSnapshotCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.disks.createSnapshot" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a snapshot of a specified persistent disk.", - // "httpMethod": "POST", - // "id": "compute.disks.createSnapshot", - // "parameterOrder": [ - // "project", - // "zone", - // "disk" - // ], - // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to snapshot.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "guestFlush": { - // "location": "query", - // "type": "boolean" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", - // "request": { - // "$ref": "Snapshot" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.disks.delete": - -type DisksDeleteCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified persistent disk. Deleting a disk -// removes its data permanently and is irreversible. However, deleting a -// disk does not delete any snapshots previously made from the disk. You -// must separately delete snapshots. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete -func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { - c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.disk = disk - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DisksDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.disks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - // "httpMethod": "DELETE", - // "id": "compute.disks.delete", - // "parameterOrder": [ - // "project", - // "zone", - // "disk" - // ], - // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/disks/{disk}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.disks.get": - -type DisksGetCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns a specified persistent disk. Get a list of available -// persistent disks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get -func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { - c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.disk = disk - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DisksGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.disks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Disk{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns a specified persistent disk. Get a list of available persistent disks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.disks.get", - // "parameterOrder": [ - // "project", - // "zone", - // "disk" - // ], - // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/disks/{disk}", - // "response": { - // "$ref": "Disk" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.disks.insert": - -type DisksInsertCall struct { - s *Service - project string - zone string - disk *Disk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a persistent disk in the specified project using the -// data in the request. You can create a disk with a sourceImage, a -// sourceSnapshot, or create an empty 500 GB data disk by omitting all -// properties. You can also create a disk that is larger than the -// default size by specifying the sizeGb property. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert -func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { - c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.disk = disk - return c -} - -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. -func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DisksInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.disks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", - // "httpMethod": "POST", - // "id": "compute.disks.insert", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "sourceImage": { - // "description": "Optional. Source image to restore onto a disk.", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/disks", - // "request": { - // "$ref": "Disk" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.disks.list": - -type DisksListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of persistent disks contained within the -// specified zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list -func (r *DisksService) List(project string, zone string) *DisksListCall { - c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *DisksListCall) Filter(filter string) *DisksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DisksListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.disks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &DiskList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of persistent disks contained within the specified zone.", - // "httpMethod": "GET", - // "id": "compute.disks.list", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/disks", - // "response": { - // "$ref": "DiskList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.resize": - -type DisksResizeCall struct { - s *Service - project string - zone string - disk string - disksresizerequest *DisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Resize: Resizes the specified persistent disk. -func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { - c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.disk = disk - c.disksresizerequest = disksresizerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DisksResizeCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.disks.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Resizes the specified persistent disk.", - // "httpMethod": "POST", - // "id": "compute.disks.resize", - // "parameterOrder": [ - // "project", - // "zone", - // "disk" - // ], - // "parameters": { - // "disk": { - // "description": "The name of the persistent disk.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/disks/{disk}/resize", - // "request": { - // "$ref": "DisksResizeRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.disks.setLabels": - -type DisksSetLabelsCall struct { - s *Service - project string - zone string - resource string - zonesetlabelsrequest *ZoneSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetLabels: Sets the labels on a disk. To learn more about labels, -// read the Labeling or Tagging Resources documentation. -func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { - c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.zonesetlabelsrequest = zonesetlabelsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DisksSetLabelsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.disks.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling or Tagging Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.disks.setLabels", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", - // "request": { - // "$ref": "ZoneSetLabelsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.disks.testIamPermissions": - -type DisksTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { - c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DisksTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.disks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.disks.testIamPermissions", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.firewalls.delete": - -type FirewallsDeleteCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete -func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { - c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.firewall = firewall - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewalls.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified firewall.", - // "httpMethod": "DELETE", - // "id": "compute.firewalls.delete", - // "parameterOrder": [ - // "project", - // "firewall" - // ], - // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/firewalls/{firewall}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.firewalls.get": - -type FirewallsGetCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get -func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { - c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.firewall = firewall - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewalls.get" call. -// Exactly one of *Firewall or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Firewall.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Firewall{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified firewall.", - // "httpMethod": "GET", - // "id": "compute.firewalls.get", - // "parameterOrder": [ - // "project", - // "firewall" - // ], - // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/firewalls/{firewall}", - // "response": { - // "$ref": "Firewall" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.firewalls.insert": - -type FirewallsInsertCall struct { - s *Service - project string - firewall *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a firewall rule in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert -func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { - c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.firewall = firewall - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewalls.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a firewall rule in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.firewalls.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/firewalls", - // "request": { - // "$ref": "Firewall" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.firewalls.list": - -type FirewallsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of firewall rules available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list -func (r *FirewallsService) List(project string) *FirewallsListCall { - c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewalls.list" call. -// Exactly one of *FirewallList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *FirewallList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &FirewallList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of firewall rules available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.firewalls.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/firewalls", - // "response": { - // "$ref": "FirewallList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.firewalls.patch": - -type FirewallsPatchCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates the specified firewall rule with the data included in -// the request. This method supports patch semantics. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch -func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { - c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.firewall = firewall - c.firewall2 = firewall2 - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewalls.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified firewall rule with the data included in the request. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.firewalls.patch", - // "parameterOrder": [ - // "project", - // "firewall" - // ], - // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/firewalls/{firewall}", - // "request": { - // "$ref": "Firewall" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.firewalls.testIamPermissions": - -type FirewallsTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *FirewallsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *FirewallsTestIamPermissionsCall { - c := &FirewallsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallsTestIamPermissionsCall) Fields(s ...googleapi.Field) *FirewallsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallsTestIamPermissionsCall) Context(ctx context.Context) *FirewallsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewalls.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.firewalls.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/firewalls/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.firewalls.update": - -type FirewallsUpdateCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified firewall rule with the data included in -// the request. Using PUT method, can only update following fields of -// firewall rule: allowed, description, sourceRanges, sourceTags, -// targetTags. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update -func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { - c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.firewall = firewall - c.firewall2 = firewall2 - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewalls.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", - // "httpMethod": "PUT", - // "id": "compute.firewalls.update", - // "parameterOrder": [ - // "project", - // "firewall" - // ], - // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/firewalls/{firewall}", - // "request": { - // "$ref": "Firewall" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.forwardingRules.aggregatedList": - -type ForwardingRulesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of forwarding rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList -func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { - c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ForwardingRulesAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.forwardingRules.aggregatedList" call. -// Exactly one of *ForwardingRuleAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ForwardingRuleAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of forwarding rules.", - // "httpMethod": "GET", - // "id": "compute.forwardingRules.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/forwardingRules", - // "response": { - // "$ref": "ForwardingRuleAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.forwardingRules.delete": - -type ForwardingRulesDeleteCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified ForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete -func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { - c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.forwardingRule = forwardingRule - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ForwardingRulesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.forwardingRules.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified ForwardingRule resource.", - // "httpMethod": "DELETE", - // "id": "compute.forwardingRules.delete", - // "parameterOrder": [ - // "project", - // "region", - // "forwardingRule" - // ], - // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.forwardingRules.get": - -type ForwardingRulesGetCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified ForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get -func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { - c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.forwardingRule = forwardingRule - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ForwardingRulesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.forwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ForwardingRule{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified ForwardingRule resource.", - // "httpMethod": "GET", - // "id": "compute.forwardingRules.get", - // "parameterOrder": [ - // "project", - // "region", - // "forwardingRule" - // ], - // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", - // "response": { - // "$ref": "ForwardingRule" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.forwardingRules.insert": - -type ForwardingRulesInsertCall struct { - s *Service - project string - region string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a ForwardingRule resource in the specified project -// and region using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert -func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { - c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.forwardingrule = forwardingrule - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ForwardingRulesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.forwardingRules.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/forwardingRules", - // "request": { - // "$ref": "ForwardingRule" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.forwardingRules.list": - -type ForwardingRulesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of ForwardingRule resources available to the -// specified project and region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list -func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { - c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ForwardingRulesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.forwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ForwardingRuleList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", - // "httpMethod": "GET", - // "id": "compute.forwardingRules.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/forwardingRules", - // "response": { - // "$ref": "ForwardingRuleList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.forwardingRules.setTarget": - -type ForwardingRulesSetTargetCall struct { - s *Service - project string - region string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetTarget: Changes target URL for forwarding rule. The new target -// should be of the same type as the old target. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget -func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { - c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.forwardingRule = forwardingRule - c.targetreference = targetreference - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ForwardingRulesSetTargetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.forwardingRules.setTarget" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.setTarget", - // "parameterOrder": [ - // "project", - // "region", - // "forwardingRule" - // ], - // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", - // "request": { - // "$ref": "TargetReference" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.forwardingRules.testIamPermissions": - -type ForwardingRulesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ForwardingRulesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ForwardingRulesTestIamPermissionsCall { - c := &ForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ForwardingRulesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *ForwardingRulesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ForwardingRulesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.forwardingRules.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.globalAddresses.delete": - -type GlobalAddressesDeleteCall struct { - s *Service - project string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete -func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { - c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.address = address - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalAddressesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "address": c.address, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalAddresses.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified address resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalAddresses.delete", - // "parameterOrder": [ - // "project", - // "address" - // ], - // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/addresses/{address}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalAddresses.get": - -type GlobalAddressesGetCall struct { - s *Service - project string - address string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified address resource. Get a list of available -// addresses by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get -func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { - c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.address = address - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalAddressesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "address": c.address, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalAddresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Address{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified address resource. Get a list of available addresses by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.globalAddresses.get", - // "parameterOrder": [ - // "project", - // "address" - // ], - // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/addresses/{address}", - // "response": { - // "$ref": "Address" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.globalAddresses.insert": - -type GlobalAddressesInsertCall struct { - s *Service - project string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates an address resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert -func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { - c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.address = address - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalAddressesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalAddresses.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates an address resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.globalAddresses.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/addresses", - // "request": { - // "$ref": "Address" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalAddresses.list": - -type GlobalAddressesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of global addresses. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list -func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { - c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalAddressesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalAddresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &AddressList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of global addresses.", - // "httpMethod": "GET", - // "id": "compute.globalAddresses.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/addresses", - // "response": { - // "$ref": "AddressList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalAddresses.testIamPermissions": - -type GlobalAddressesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *GlobalAddressesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalAddressesTestIamPermissionsCall { - c := &GlobalAddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalAddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalAddressesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalAddressesTestIamPermissionsCall) Context(ctx context.Context) *GlobalAddressesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalAddressesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalAddresses.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.globalAddresses.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/addresses/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.globalForwardingRules.delete": - -type GlobalForwardingRulesDeleteCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified GlobalForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete -func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { - c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.forwardingRule = forwardingRule - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalForwardingRules.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified GlobalForwardingRule resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalForwardingRules.delete", - // "parameterOrder": [ - // "project", - // "forwardingRule" - // ], - // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalForwardingRules.get": - -type GlobalForwardingRulesGetCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified GlobalForwardingRule resource. Get a list -// of available forwarding rules by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get -func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { - c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.forwardingRule = forwardingRule - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalForwardingRulesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalForwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ForwardingRule{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified GlobalForwardingRule resource. Get a list of available forwarding rules by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.get", - // "parameterOrder": [ - // "project", - // "forwardingRule" - // ], - // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", - // "response": { - // "$ref": "ForwardingRule" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.globalForwardingRules.insert": - -type GlobalForwardingRulesInsertCall struct { - s *Service - project string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a GlobalForwardingRule resource in the specified -// project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert -func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { - c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.forwardingrule = forwardingrule - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalForwardingRulesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalForwardingRules.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/forwardingRules", - // "request": { - // "$ref": "ForwardingRule" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalForwardingRules.list": - -type GlobalForwardingRulesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of GlobalForwardingRule resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list -func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { - c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalForwardingRulesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalForwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ForwardingRuleList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/forwardingRules", - // "response": { - // "$ref": "ForwardingRuleList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalForwardingRules.setTarget": - -type GlobalForwardingRulesSetTargetCall struct { - s *Service - project string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetTarget: Changes target URL for the GlobalForwardingRule resource. -// The new target should be of the same type as the old target. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget -func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { - c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.forwardingRule = forwardingRule - c.targetreference = targetreference - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalForwardingRules.setTarget" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", - // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.setTarget", - // "parameterOrder": [ - // "project", - // "forwardingRule" - // ], - // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", - // "request": { - // "$ref": "TargetReference" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalForwardingRules.testIamPermissions": - -type GlobalForwardingRulesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *GlobalForwardingRulesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalForwardingRulesTestIamPermissionsCall { - c := &GlobalForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *GlobalForwardingRulesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalForwardingRules.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/forwardingRules/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.globalOperations.aggregatedList": - -type GlobalOperationsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of all operations. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList -func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { - c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalOperationsAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalOperations.aggregatedList" call. -// Exactly one of *OperationAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *OperationAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &OperationAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of all operations.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/operations", - // "response": { - // "$ref": "OperationAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalOperations.delete": - -type GlobalOperationsDeleteCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete -func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { - c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalOperationsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalOperations.delete" call. -func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Deletes the specified Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalOperations.delete", - // "parameterOrder": [ - // "project", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/operations/{operation}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalOperations.get": - -type GlobalOperationsGetCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Retrieves the specified Operations resource. Get a list of -// operations by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get -func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { - c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalOperationsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalOperations.get" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the specified Operations resource. Get a list of operations by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.get", - // "parameterOrder": [ - // "project", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/operations/{operation}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.globalOperations.list": - -type GlobalOperationsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of Operation resources contained within the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list -func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { - c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalOperationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &OperationList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of Operation resources contained within the specified project.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/operations", - // "response": { - // "$ref": "OperationList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.healthChecks.delete": - -type HealthChecksDeleteCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified HealthCheck resource. -func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { - c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.healthCheck = healthCheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified HealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.healthChecks.delete", - // "parameterOrder": [ - // "project", - // "healthCheck" - // ], - // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.healthChecks.get": - -type HealthChecksGetCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified HealthCheck resource. Get a list of -// available health checks by making a list() request. -func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { - c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.healthCheck = healthCheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &HealthCheck{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified HealthCheck resource. Get a list of available health checks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.healthChecks.get", - // "parameterOrder": [ - // "project", - // "healthCheck" - // ], - // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "response": { - // "$ref": "HealthCheck" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.healthChecks.insert": - -type HealthChecksInsertCall struct { - s *Service - project string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { - c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.healthcheck = healthcheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.healthChecks.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks", - // "request": { - // "$ref": "HealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.healthChecks.list": - -type HealthChecksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of HealthCheck resources available to the -// specified project. -func (r *HealthChecksService) List(project string) *HealthChecksListCall { - c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &HealthCheckList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.healthChecks.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks", - // "response": { - // "$ref": "HealthCheckList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.healthChecks.patch": - -type HealthChecksPatchCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports patch -// semantics. -func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { - c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.healthChecks.patch", - // "parameterOrder": [ - // "project", - // "healthCheck" - // ], - // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "request": { - // "$ref": "HealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.healthChecks.testIamPermissions": - -type HealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HealthChecksTestIamPermissionsCall { - c := &HealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HealthChecksTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HealthChecksTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.healthChecks.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.healthChecks.update": - -type HealthChecksUpdateCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { - c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.healthChecks.update", - // "parameterOrder": [ - // "project", - // "healthCheck" - // ], - // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "request": { - // "$ref": "HealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpHealthChecks.delete": - -type HttpHealthChecksDeleteCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified HttpHealthCheck resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete -func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { - c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpHealthCheck = httpHealthCheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified HttpHealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.httpHealthChecks.delete", - // "parameterOrder": [ - // "project", - // "httpHealthCheck" - // ], - // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpHealthChecks.get": - -type HttpHealthChecksGetCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified HttpHealthCheck resource. Get a list of -// available HTTP health checks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get -func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { - c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpHealthCheck = httpHealthCheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.get" call. -// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HttpHealthCheck.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &HttpHealthCheck{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.get", - // "parameterOrder": [ - // "project", - // "httpHealthCheck" - // ], - // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "response": { - // "$ref": "HttpHealthCheck" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.httpHealthChecks.insert": - -type HttpHealthChecksInsertCall struct { - s *Service - project string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert -func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { - c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httphealthcheck = httphealthcheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks", - // "request": { - // "$ref": "HttpHealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpHealthChecks.list": - -type HttpHealthChecksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of HttpHealthCheck resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list -func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { - c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.list" call. -// Exactly one of *HttpHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &HttpHealthCheckList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks", - // "response": { - // "$ref": "HttpHealthCheckList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.httpHealthChecks.patch": - -type HttpHealthChecksPatchCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. This method supports patch -// semantics. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch -func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { - c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.httpHealthChecks.patch", - // "parameterOrder": [ - // "project", - // "httpHealthCheck" - // ], - // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "request": { - // "$ref": "HttpHealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.httpHealthChecks.testIamPermissions": - -type HttpHealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HttpHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpHealthChecksTestIamPermissionsCall { - c := &HttpHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpHealthChecksTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpHealthChecksTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.httpHealthChecks.update": - -type HttpHealthChecksUpdateCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update -func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { - c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.httpHealthChecks.update", - // "parameterOrder": [ - // "project", - // "httpHealthCheck" - // ], - // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "request": { - // "$ref": "HttpHealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpsHealthChecks.delete": - -type HttpsHealthChecksDeleteCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified HttpsHealthCheck resource. -func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { - c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpsHealthCheck = httpsHealthCheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpsHealthChecksDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpsHealthChecks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified HttpsHealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.httpsHealthChecks.delete", - // "parameterOrder": [ - // "project", - // "httpsHealthCheck" - // ], - // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpsHealthChecks.get": - -type HttpsHealthChecksGetCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified HttpsHealthCheck resource. Get a list of -// available HTTPS health checks by making a list() request. -func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { - c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpsHealthCheck = httpsHealthCheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpsHealthChecksGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpsHealthChecks.get" call. -// Exactly one of *HttpsHealthCheck or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheck.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &HttpsHealthCheck{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.get", - // "parameterOrder": [ - // "project", - // "httpsHealthCheck" - // ], - // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "response": { - // "$ref": "HttpsHealthCheck" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.httpsHealthChecks.insert": - -type HttpsHealthChecksInsertCall struct { - s *Service - project string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { - c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpshealthcheck = httpshealthcheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpsHealthChecksInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpsHealthChecks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpsHealthChecks", - // "request": { - // "$ref": "HttpsHealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpsHealthChecks.list": - -type HttpsHealthChecksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of HttpsHealthCheck resources available to -// the specified project. -func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { - c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpsHealthChecksListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpsHealthChecks.list" call. -// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &HttpsHealthCheckList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpsHealthChecks", - // "response": { - // "$ref": "HttpsHealthCheckList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.httpsHealthChecks.patch": - -type HttpsHealthChecksPatchCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. This method supports patch -// semantics. -func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { - c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpsHealthChecksPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpsHealthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.httpsHealthChecks.patch", - // "parameterOrder": [ - // "project", - // "httpsHealthCheck" - // ], - // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "request": { - // "$ref": "HttpsHealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.httpsHealthChecks.testIamPermissions": - -type HttpsHealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HttpsHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpsHealthChecksTestIamPermissionsCall { - c := &HttpsHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpsHealthChecksTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpsHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpsHealthChecksTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpsHealthChecksTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpsHealthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.httpsHealthChecks.update": - -type HttpsHealthChecksUpdateCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { - c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpsHealthChecksUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpsHealthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.httpsHealthChecks.update", - // "parameterOrder": [ - // "project", - // "httpsHealthCheck" - // ], - // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "request": { - // "$ref": "HttpsHealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.images.delete": - -type ImagesDeleteCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified image. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete -func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { - c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.image = image - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified image.", - // "httpMethod": "DELETE", - // "id": "compute.images.delete", - // "parameterOrder": [ - // "project", - // "image" - // ], - // "parameters": { - // "image": { - // "description": "Name of the image resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images/{image}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.images.deprecate": - -type ImagesDeprecateCall struct { - s *Service - project string - image string - deprecationstatus *DeprecationStatus - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Deprecate: Sets the deprecation status of an image. -// -// If an empty request body is given, clears the deprecation status -// instead. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate -func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { - c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.image = image - c.deprecationstatus = deprecationstatus - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesDeprecateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.deprecate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", - // "httpMethod": "POST", - // "id": "compute.images.deprecate", - // "parameterOrder": [ - // "project", - // "image" - // ], - // "parameters": { - // "image": { - // "description": "Image name.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images/{image}/deprecate", - // "request": { - // "$ref": "DeprecationStatus" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.images.get": - -type ImagesGetCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified image. Get a list of available images by -// making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get -func (r *ImagesService) Get(project string, image string) *ImagesGetCall { - c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.image = image - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.get" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Image{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified image. Get a list of available images by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.images.get", - // "parameterOrder": [ - // "project", - // "image" - // ], - // "parameters": { - // "image": { - // "description": "Name of the image resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images/{image}", - // "response": { - // "$ref": "Image" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.images.getFromFamily": - -type ImagesGetFromFamilyCall struct { - s *Service - project string - family string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. -func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { - c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.family = family - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesGetFromFamilyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "family": c.family, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.getFromFamily" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Image{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the latest image that is part of an image family and is not deprecated.", - // "httpMethod": "GET", - // "id": "compute.images.getFromFamily", - // "parameterOrder": [ - // "project", - // "family" - // ], - // "parameters": { - // "family": { - // "description": "Name of the image family to search for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images/family/{family}", - // "response": { - // "$ref": "Image" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.images.insert": - -type ImagesInsertCall struct { - s *Service - project string - image *Image - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates an image in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert -func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { - c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.image = image - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates an image in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.images.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images", - // "request": { - // "$ref": "Image" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "compute.images.list": - -type ImagesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of private images available to the specified -// project. Private images are images you create that belong to your -// project. This method does not get any images that belong to other -// projects, including publicly-available images, like Debian 8. If you -// want to get a list of publicly-available images, use this method to -// make a request to the respective image project, such as debian-cloud -// or windows-cloud. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list -func (r *ImagesService) List(project string) *ImagesListCall { - c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *ImagesListCall) Filter(filter string) *ImagesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.list" call. -// Exactly one of *ImageList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ImageList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ImageList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", - // "httpMethod": "GET", - // "id": "compute.images.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images", - // "response": { - // "$ref": "ImageList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.images.setLabels": - -type ImagesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetLabels: Sets the labels on an image. To learn more about labels, -// read the Labeling or Tagging Resources documentation. -func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { - c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesSetLabelsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the labels on an image. To learn more about labels, read the Labeling or Tagging Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.images.setLabels", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.images.testIamPermissions": - -type ImagesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { - c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.images.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.abandonInstances": - -type InstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AbandonInstances: Schedules a group action to remove the specified -// instances from the managed instance group. Abandoning an instance -// does not delete the instance, but it does remove the instance from -// any target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { - c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.abandonInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.abandonInstances", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - // "request": { - // "$ref": "InstanceGroupManagersAbandonInstancesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.aggregatedList": - -type InstanceGroupManagersAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves the list of managed instance groups and -// groups them by zone. -func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { - c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.aggregatedList" call. -// Exactly one of *InstanceGroupManagerAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupManagerAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of managed instance groups and groups them by zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/instanceGroupManagers", - // "response": { - // "$ref": "InstanceGroupManagerAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.delete": - -type InstanceGroupManagersDeleteCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. Note that the instance group must not belong -// to a backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { - c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroupManagers.delete", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.deleteInstances": - -type InstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// DeleteInstances: Schedules a group action to delete the specified -// instances in the managed instance group. The instances are also -// removed from any target pools of which they were a member. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you delete. This operation is marked as DONE -// when the action is scheduled even if the instances are still being -// deleted. You must separately verify the status of the deleting action -// with the listmanagedinstances method. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { - c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.deleteInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deleteInstances", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - // "request": { - // "$ref": "InstanceGroupManagersDeleteInstancesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.get": - -type InstanceGroupManagersGetCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns all of the details about the specified managed instance -// group. Get a list of available managed instance groups by making a -// list() request. -func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { - c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupManager{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.get", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "response": { - // "$ref": "InstanceGroupManager" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.insert": - -type InstanceGroupManagersInsertCall struct { - s *Service - project string - zone string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, it schedules -// an action to create instances in the group using the specified -// instance template. This operation is marked as DONE when the group is -// created even if the instances in the group have not yet been created. -// You must separately verify the status of the individual instances -// with the listmanagedinstances method. -// -// A managed instance group can have up to 1000 VM instances per group. -func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { - c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instancegroupmanager = instancegroupmanager - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.insert", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", - // "request": { - // "$ref": "InstanceGroupManager" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.list": - -type InstanceGroupManagersListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of managed instance groups that are contained -// within the specified project and zone. -func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { - c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.list" call. -// Exactly one of *InstanceGroupManagerList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupManagerList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupManagerList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.list", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", - // "response": { - // "$ref": "InstanceGroupManagerList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.listManagedInstances": - -type InstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ListManagedInstances: Lists all of the instances in the managed -// instance group. Each instance in the list has a currentAction, which -// indicates the action that the managed instance group is performing on -// the instance. For example, if the group is still creating an -// instance, the currentAction is CREATING. If a previous action failed, -// the list displays the errors for that failed action. -func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { - c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": -func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": -func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": -func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": -func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. -// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head -// er or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupManagersListManagedInstancesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listManagedInstances", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "filter": { - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", - // "response": { - // "$ref": "InstanceGroupManagersListManagedInstancesResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.patch": - -type InstanceGroupManagersPatchCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listManagedInstances method. This -// method supports patch semantics. -func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { - c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.instanceGroupManagers.patch", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.recreateInstances": - -type InstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// RecreateInstances: Schedules a group action to recreate the specified -// instances in the managed instance group. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the action is -// scheduled even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { - c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.recreateInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.recreateInstances", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", - // "request": { - // "$ref": "InstanceGroupManagersRecreateInstancesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.resize": - -type InstanceGroupManagersResizeCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Resize: Resizes the managed instance group. If you increase the size, -// the group creates new instances using the current instance template. -// If you decrease the size, the group deletes instances. The resize -// operation is marked DONE when the resize actions are scheduled even -// if the group has not yet added or deleted any instances. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { - c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersResizeCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resize", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager", - // "size" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "size": { - // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - // "format": "int32", - // "location": "query", - // "required": true, - // "type": "integer" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.resizeAdvanced": - -type InstanceGroupManagersResizeAdvancedCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ResizeAdvanced: Resizes the managed instance group with advanced -// configuration options like disabling creation retries. This is an -// extended version of the resize method. -// -// If you increase the size of the instance group, the group creates new -// instances using the current instance template. If you decrease the -// size, the group deletes instances. The resize operation is marked -// DONE when the resize actions are scheduled even if the group has not -// yet added or deleted any instances. You must separately verify the -// status of the creating, creatingWithoutRetries, or deleting actions -// with the get or listmanagedinstances method. -func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone string, instanceGroupManager string, instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest) *InstanceGroupManagersResizeAdvancedCall { - c := &InstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersresizeadvancedrequest = instancegroupmanagersresizeadvancedrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeAdvancedCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *InstanceGroupManagersResizeAdvancedCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersResizeAdvancedCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersresizeadvancedrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.resizeAdvanced" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resizeAdvanced", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", - // "request": { - // "$ref": "InstanceGroupManagersResizeAdvancedRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.setAutoHealingPolicies": - -type InstanceGroupManagersSetAutoHealingPoliciesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetAutoHealingPolicies: Modifies the autohealing policies. -func (r *InstanceGroupManagersService) SetAutoHealingPolicies(project string, zone string, instanceGroupManager string, instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest) *InstanceGroupManagersSetAutoHealingPoliciesCall { - c := &InstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetautohealingrequest = instancegroupmanagerssetautohealingrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetAutoHealingPoliciesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *InstanceGroupManagersSetAutoHealingPoliciesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetautohealingrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.setAutoHealingPolicies" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Modifies the autohealing policies.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setAutoHealingPolicies", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", - // "request": { - // "$ref": "InstanceGroupManagersSetAutoHealingRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.setInstanceTemplate": - -type InstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetInstanceTemplate: Specifies the instance template to use when -// creating new instances in this group. The templates for existing -// instances in the group do not change unless you recreate them. -func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { - c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setInstanceTemplate", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - // "request": { - // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.setTargetPools": - -type InstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetTargetPools: Modifies the target pools to which all instances in -// this managed instance group are assigned. The target pools -// automatically apply to all of the instances in the managed instance -// group. This operation is marked DONE when you make the request even -// if the instances have not yet been added to their target pools. The -// change might take some time to apply to all of the instances in the -// group depending on the size of the group. -func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { - c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.setTargetPools" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setTargetPools", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.testIamPermissions": - -type InstanceGroupManagersTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceGroupManagersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupManagersTestIamPermissionsCall { - c := &InstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupManagersTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.testIamPermissions", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.update": - -type InstanceGroupManagersUpdateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is updated even if the instances in the group have not yet been -// updated. You must separately verify the status of the individual -// instances with the listManagedInstances method. -func (r *InstanceGroupManagersService) Update(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersUpdateCall { - c := &InstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersUpdateCall) Context(ctx context.Context) *InstanceGroupManagersUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", - // "httpMethod": "PUT", - // "id": "compute.instanceGroupManagers.update", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroups.addInstances": - -type InstanceGroupsAddInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AddInstances: Adds a list of instances to the specified instance -// group. All of the instances in the instance group must be in the same -// network/subnetwork. Read Adding instances for more information. -func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { - c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsAddInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.addInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.addInstances", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where you are adding instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", - // "request": { - // "$ref": "InstanceGroupsAddInstancesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroups.aggregatedList": - -type InstanceGroupsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves the list of instance groups and sorts them -// by zone. -func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { - c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.aggregatedList" call. -// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of instance groups and sorts them by zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/instanceGroups", - // "response": { - // "$ref": "InstanceGroupAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.delete": - -type InstanceGroupsDeleteCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified instance group. The instances in the -// group are not deleted. Note that instance group must not belong to a -// backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { - c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroups.delete", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroups.get": - -type InstanceGroupsGetCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified instance group. Get a list of available -// instance groups by making a list() request. -func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { - c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroup{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.get", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", - // "response": { - // "$ref": "InstanceGroup" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instanceGroups.insert": - -type InstanceGroupsInsertCall struct { - s *Service - project string - zone string - instancegroup *InstanceGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates an instance group in the specified project using the -// parameters that are included in the request. -func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { - c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instancegroup = instancegroup - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.insert", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups", - // "request": { - // "$ref": "InstanceGroup" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroups.list": - -type InstanceGroupsListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of instance groups that are located in the -// specified project and zone. -func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { - c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.list" call. -// Exactly one of *InstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.list", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups", - // "response": { - // "$ref": "InstanceGroupList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.listInstances": - -type InstanceGroupsListInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ListInstances: Lists the instances in the specified instance group. -func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { - c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsListInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.listInstances" call. -// Exactly one of *InstanceGroupsListInstances or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupsListInstances.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupsListInstances{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists the instances in the specified instance group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.listInstances", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "instanceGroup": { - // "description": "The name of the instance group from which you want to generate a list of included instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "InstanceGroupsListInstancesRequest" - // }, - // "response": { - // "$ref": "InstanceGroupsListInstances" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.removeInstances": - -type InstanceGroupsRemoveInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// RemoveInstances: Removes one or more instances from the specified -// instance group, but does not delete those instances. -func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { - c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.removeInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.removeInstances", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the specified instances will be removed.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", - // "request": { - // "$ref": "InstanceGroupsRemoveInstancesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroups.setNamedPorts": - -type InstanceGroupsSetNamedPortsCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetNamedPorts: Sets the named ports for the specified instance group. -func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { - c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.setNamedPorts" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the named ports for the specified instance group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.setNamedPorts", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the named ports are updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", - // "request": { - // "$ref": "InstanceGroupsSetNamedPortsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroups.testIamPermissions": - -type InstanceGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupsTestIamPermissionsCall { - c := &InstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.testIamPermissions", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instanceTemplates.delete": - -type InstanceTemplatesDeleteCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified instance template. If you delete an -// instance template that is being referenced from another instance -// group, the instance group will not be able to create or recreate -// virtual machine instances. Deleting an instance template is permanent -// and cannot be undone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete -func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { - c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.instanceTemplate = instanceTemplate - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceTemplatesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceTemplates.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", - // "httpMethod": "DELETE", - // "id": "compute.instanceTemplates.delete", - // "parameterOrder": [ - // "project", - // "instanceTemplate" - // ], - // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceTemplates.get": - -type InstanceTemplatesGetCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified instance template. Get a list of available -// instance templates by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get -func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { - c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.instanceTemplate = instanceTemplate - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceTemplatesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceTemplates.get" call. -// Exactly one of *InstanceTemplate or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplate.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceTemplate{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.get", - // "parameterOrder": [ - // "project", - // "instanceTemplate" - // ], - // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", - // "response": { - // "$ref": "InstanceTemplate" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instanceTemplates.insert": - -type InstanceTemplatesInsertCall struct { - s *Service - project string - instancetemplate *InstanceTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates an instance template in the specified project using -// the data that is included in the request. If you are creating a new -// template to update an existing instance group, your new instance -// template must use the same network or, if applicable, the same -// subnetwork as the original template. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert -func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { - c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.instancetemplate = instancetemplate - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceTemplatesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceTemplates.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/instanceTemplates", - // "request": { - // "$ref": "InstanceTemplate" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceTemplates.list": - -type InstanceTemplatesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of instance templates that are contained -// within the specified project and zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list -func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { - c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceTemplatesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceTemplates.list" call. -// Exactly one of *InstanceTemplateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceTemplateList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/instanceTemplates", - // "response": { - // "$ref": "InstanceTemplateList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceTemplates.testIamPermissions": - -type InstanceTemplatesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { - c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceTemplates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instances.addAccessConfig": - -type InstancesAddAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AddAccessConfig: Adds an access config to an instance's network -// interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig -func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { - c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesAddAccessConfigCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.addAccessConfig" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Adds an access config to an instance's network interface.", - // "httpMethod": "POST", - // "id": "compute.instances.addAccessConfig", - // "parameterOrder": [ - // "project", - // "zone", - // "instance", - // "networkInterface" - // ], - // "parameters": { - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "networkInterface": { - // "description": "The name of the network interface to add to this instance.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", - // "request": { - // "$ref": "AccessConfig" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.aggregatedList": - -type InstancesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves aggregated list of instances. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList -func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { - c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.aggregatedList" call. -// Exactly one of *InstanceAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves aggregated list of instances.", - // "httpMethod": "GET", - // "id": "compute.instances.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/instances", - // "response": { - // "$ref": "InstanceAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.attachDisk": - -type InstancesAttachDiskCall struct { - s *Service - project string - zone string - instance string - attacheddisk *AttachedDisk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AttachDisk: Attaches a Disk resource to an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk -func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { - c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.attacheddisk = attacheddisk - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesAttachDiskCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.attachDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Attaches a Disk resource to an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.attachDisk", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", - // "request": { - // "$ref": "AttachedDisk" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.delete": - -type InstancesDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified Instance resource. For more -// information, see Stopping or Deleting an Instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete -func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { - c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", - // "httpMethod": "DELETE", - // "id": "compute.instances.delete", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.deleteAccessConfig": - -type InstancesDeleteAccessConfigCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// DeleteAccessConfig: Deletes an access config from an instance's -// network interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig -func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { - c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("accessConfig", accessConfig) - c.urlParams_.Set("networkInterface", networkInterface) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesDeleteAccessConfigCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.deleteAccessConfig" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes an access config from an instance's network interface.", - // "httpMethod": "POST", - // "id": "compute.instances.deleteAccessConfig", - // "parameterOrder": [ - // "project", - // "zone", - // "instance", - // "accessConfig", - // "networkInterface" - // ], - // "parameters": { - // "accessConfig": { - // "description": "The name of the access config to delete.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "networkInterface": { - // "description": "The name of the network interface.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.detachDisk": - -type InstancesDetachDiskCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// DetachDisk: Detaches a disk from an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk -func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { - c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("deviceName", deviceName) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesDetachDiskCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.detachDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Detaches a disk from an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.detachDisk", - // "parameterOrder": [ - // "project", - // "zone", - // "instance", - // "deviceName" - // ], - // "parameters": { - // "deviceName": { - // "description": "Disk device name to detach.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "Instance name.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.get": - -type InstancesGetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified Instance resource. Get a list of available -// instances by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get -func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { - c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.get" call. -// Exactly one of *Instance or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Instance.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Instance{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instances.get", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}", - // "response": { - // "$ref": "Instance" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instances.getSerialPortOutput": - -type InstancesGetSerialPortOutputCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetSerialPortOutput: Returns the specified instance's serial port -// output. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput -func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { - c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Port sets the optional parameter "port": Specifies which COM or -// serial port to retrieve data from. -func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("port", fmt.Sprint(port)) - return c -} - -// Start sets the optional parameter "start": Returns output starting -// from a specific byte position. Use this to page through output when -// the output is too large to return in a single request. For the -// initial request, leave this field unspecified. For subsequent calls, -// this field should be set to the next value returned in the previous -// call. -func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("start", fmt.Sprint(start)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesGetSerialPortOutputCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.getSerialPortOutput" call. -// Exactly one of *SerialPortOutput or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SerialPortOutput.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &SerialPortOutput{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified instance's serial port output.", - // "httpMethod": "GET", - // "id": "compute.instances.getSerialPortOutput", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "port": { - // "default": "1", - // "description": "Specifies which COM or serial port to retrieve data from.", - // "format": "int32", - // "location": "query", - // "maximum": "4", - // "minimum": "1", - // "type": "integer" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "start": { - // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", - // "response": { - // "$ref": "SerialPortOutput" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instances.insert": - -type InstancesInsertCall struct { - s *Service - project string - zone string - instance *Instance - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates an instance resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert -func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { - c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates an instance resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.insert", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances", - // "request": { - // "$ref": "Instance" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.list": - -type InstancesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of instances contained within the specified -// zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list -func (r *InstancesService) List(project string, zone string) *InstancesListCall { - c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstancesListCall) Filter(filter string) *InstancesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.list" call. -// Exactly one of *InstanceList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of instances contained within the specified zone.", - // "httpMethod": "GET", - // "id": "compute.instances.list", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances", - // "response": { - // "$ref": "InstanceList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.listReferrers": - -type InstancesListReferrersCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// ListReferrers: Retrieves the list of referrers to instances contained -// within the specified zone. -func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { - c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesListReferrersCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.listReferrers" call. -// Exactly one of *InstanceListReferrers or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceListReferrers.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceListReferrers{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of referrers to instances contained within the specified zone.", - // "httpMethod": "GET", - // "id": "compute.instances.listReferrers", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "instance": { - // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", - // "location": "path", - // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/referrers", - // "response": { - // "$ref": "InstanceListReferrers" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.reset": - -type InstancesResetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Reset: Performs a reset on the instance. For more information, see -// Resetting an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset -func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { - c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesResetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.reset" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Performs a reset on the instance. For more information, see Resetting an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.reset", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/reset", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.setDiskAutoDelete": - -type InstancesSetDiskAutoDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to -// an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete -func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { - c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) - c.urlParams_.Set("deviceName", deviceName) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.setDiskAutoDelete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the auto-delete flag for a disk attached to an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.setDiskAutoDelete", - // "parameterOrder": [ - // "project", - // "zone", - // "instance", - // "autoDelete", - // "deviceName" - // ], - // "parameters": { - // "autoDelete": { - // "description": "Whether to auto-delete the disk when the instance is deleted.", - // "location": "query", - // "required": true, - // "type": "boolean" - // }, - // "deviceName": { - // "description": "The device name of the disk to modify.", - // "location": "query", - // "pattern": "\\w[\\w.-]{0,254}", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "The instance name.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.setLabels": - -type InstancesSetLabelsCall struct { - s *Service - project string - zone string - instance string - instancessetlabelsrequest *InstancesSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetLabels: Sets labels on an instance. To learn more about labels, -// read the Labeling or Tagging Resources documentation. -func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { - c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.instancessetlabelsrequest = instancessetlabelsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesSetLabelsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets labels on an instance. To learn more about labels, read the Labeling or Tagging Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.instances.setLabels", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", - // "request": { - // "$ref": "InstancesSetLabelsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.setMachineResources": - -type InstancesSetMachineResourcesCall struct { - s *Service - project string - zone string - instance string - instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetMachineResources: Changes the number and/or type of accelerator -// for a stopped instance to the values specified in the request. -func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { - c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesSetMachineResourcesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.setMachineResources" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setMachineResources", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", - // "request": { - // "$ref": "InstancesSetMachineResourcesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.setMachineType": - -type InstancesSetMachineTypeCall struct { - s *Service - project string - zone string - instance string - instancessetmachinetyperequest *InstancesSetMachineTypeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetMachineType: Changes the machine type for a stopped instance to -// the machine type specified in the request. -func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { - c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.instancessetmachinetyperequest = instancessetmachinetyperequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesSetMachineTypeCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.setMachineType" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setMachineType", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", - // "request": { - // "$ref": "InstancesSetMachineTypeRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.setMetadata": - -type InstancesSetMetadataCall struct { - s *Service - project string - zone string - instance string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetMetadata: Sets metadata for the specified instance to the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata -func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { - c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.metadata = metadata - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesSetMetadataCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.setMetadata" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets metadata for the specified instance to the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setMetadata", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", - // "request": { - // "$ref": "Metadata" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.setScheduling": - -type InstancesSetSchedulingCall struct { - s *Service - project string - zone string - instance string - scheduling *Scheduling - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetScheduling: Sets an instance's scheduling options. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling -func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { - c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.scheduling = scheduling - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesSetSchedulingCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.setScheduling" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets an instance's scheduling options.", - // "httpMethod": "POST", - // "id": "compute.instances.setScheduling", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Instance name.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", - // "request": { - // "$ref": "Scheduling" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.setServiceAccount": - -type InstancesSetServiceAccountCall struct { - s *Service - project string - zone string - instance string - instancessetserviceaccountrequest *InstancesSetServiceAccountRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetServiceAccount: Sets the service account on the instance. For more -// information, read Changing the service account and access scopes for -// an instance. -func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { - c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.instancessetserviceaccountrequest = instancessetserviceaccountrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesSetServiceAccountCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.setServiceAccount" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.setServiceAccount", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", - // "request": { - // "$ref": "InstancesSetServiceAccountRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.setTags": - -type InstancesSetTagsCall struct { - s *Service - project string - zone string - instance string - tags *Tags - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetTags: Sets tags for the specified instance to the data included in -// the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags -func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { - c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.tags = tags - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesSetTagsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.setTags" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets tags for the specified instance to the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setTags", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setTags", - // "request": { - // "$ref": "Tags" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.start": - -type InstancesStartCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Start: Starts an instance that was stopped using the using the -// instances().stop method. For more information, see Restart an -// instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start -func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { - c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesStartCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.start" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.start", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/start", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.startWithEncryptionKey": - -type InstancesStartWithEncryptionKeyCall struct { - s *Service - project string - zone string - instance string - instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// StartWithEncryptionKey: Starts an instance that was stopped using the -// using the instances().stop method. For more information, see Restart -// an instance. -func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { - c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.startWithEncryptionKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.startWithEncryptionKey", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", - // "request": { - // "$ref": "InstancesStartWithEncryptionKeyRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.stop": - -type InstancesStopCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Stop: Stops a running instance, shutting it down cleanly, and allows -// you to restart the instance at a later time. Stopped instances do not -// incur per-minute, virtual machine usage charges while they are -// stopped, but any resources that the virtual machine is using, such as -// persistent disks and static IP addresses, will continue to be charged -// until they are deleted. For more information, see Stopping an -// instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop -func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { - c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesStopCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.stop" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.stop", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance resource to stop.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{instance}/stop", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.testIamPermissions": - -type InstancesTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { - c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instances.testIamPermissions", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.licenses.get": - -type LicensesGetCall struct { - s *Service - project string - license string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified License resource. Get a list of available -// licenses by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get -func (r *LicensesService) Get(project string, license string) *LicensesGetCall { - c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.license = license - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *LicensesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "license": c.license, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.licenses.get" call. -// Exactly one of *License or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *License.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &License{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified License resource. Get a list of available licenses by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.licenses.get", - // "parameterOrder": [ - // "project", - // "license" - // ], - // "parameters": { - // "license": { - // "description": "Name of the License resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/licenses/{license}", - // "response": { - // "$ref": "License" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.machineTypes.aggregatedList": - -type MachineTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of machine types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList -func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { - c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *MachineTypesAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.machineTypes.aggregatedList" call. -// Exactly one of *MachineTypeAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *MachineTypeAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &MachineTypeAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of machine types.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/machineTypes", - // "response": { - // "$ref": "MachineTypeAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.machineTypes.get": - -type MachineTypesGetCall struct { - s *Service - project string - zone string - machineType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified machine type. Get a list of available -// machine types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get -func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { - c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.machineType = machineType - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *MachineTypesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "machineType": c.machineType, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.machineTypes.get" call. -// Exactly one of *MachineType or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *MachineType.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &MachineType{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.get", - // "parameterOrder": [ - // "project", - // "zone", - // "machineType" - // ], - // "parameters": { - // "machineType": { - // "description": "Name of the machine type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/machineTypes/{machineType}", - // "response": { - // "$ref": "MachineType" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.machineTypes.list": - -type MachineTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of machine types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list -func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { - c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *MachineTypesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.machineTypes.list" call. -// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *MachineTypeList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &MachineTypeList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of machine types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.list", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/machineTypes", - // "response": { - // "$ref": "MachineTypeList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.addPeering": - -type NetworksAddPeeringCall struct { - s *Service - project string - network string - networksaddpeeringrequest *NetworksAddPeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AddPeering: Adds a peering to the specified network. -func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { - c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.network = network - c.networksaddpeeringrequest = networksaddpeeringrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NetworksAddPeeringCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.networks.addPeering" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Adds a peering to the specified network.", - // "httpMethod": "POST", - // "id": "compute.networks.addPeering", - // "parameterOrder": [ - // "project", - // "network" - // ], - // "parameters": { - // "network": { - // "description": "Name of the network resource to add peering to.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/networks/{network}/addPeering", - // "request": { - // "$ref": "NetworksAddPeeringRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.networks.delete": - -type NetworksDeleteCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified network. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete -func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { - c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.network = network - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NetworksDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.networks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified network.", - // "httpMethod": "DELETE", - // "id": "compute.networks.delete", - // "parameterOrder": [ - // "project", - // "network" - // ], - // "parameters": { - // "network": { - // "description": "Name of the network to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/networks/{network}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.networks.get": - -type NetworksGetCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified network. Get a list of available networks -// by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get -func (r *NetworksService) Get(project string, network string) *NetworksGetCall { - c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.network = network - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NetworksGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.networks.get" call. -// Exactly one of *Network or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Network.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Network{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified network. Get a list of available networks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.networks.get", - // "parameterOrder": [ - // "project", - // "network" - // ], - // "parameters": { - // "network": { - // "description": "Name of the network to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/networks/{network}", - // "response": { - // "$ref": "Network" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.networks.insert": - -type NetworksInsertCall struct { - s *Service - project string - network *Network - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a network in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert -func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { - c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.network = network - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NetworksInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.networks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a network in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.networks.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/networks", - // "request": { - // "$ref": "Network" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.networks.list": - -type NetworksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of networks available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list -func (r *NetworksService) List(project string) *NetworksListCall { - c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *NetworksListCall) Filter(filter string) *NetworksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NetworksListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.networks.list" call. -// Exactly one of *NetworkList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NetworkList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &NetworkList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of networks available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.networks.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/networks", - // "response": { - // "$ref": "NetworkList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.removePeering": - -type NetworksRemovePeeringCall struct { - s *Service - project string - network string - networksremovepeeringrequest *NetworksRemovePeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// RemovePeering: Removes a peering from the specified network. -func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { - c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.network = network - c.networksremovepeeringrequest = networksremovepeeringrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NetworksRemovePeeringCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.networks.removePeering" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Removes a peering from the specified network.", - // "httpMethod": "POST", - // "id": "compute.networks.removePeering", - // "parameterOrder": [ - // "project", - // "network" - // ], - // "parameters": { - // "network": { - // "description": "Name of the network resource to remove peering from.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/networks/{network}/removePeering", - // "request": { - // "$ref": "NetworksRemovePeeringRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.networks.switchToCustomMode": - -type NetworksSwitchToCustomModeCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SwitchToCustomMode: Switches the network mode from auto subnet mode -// to custom subnet mode. -func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { - c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.network = network - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NetworksSwitchToCustomModeCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.networks.switchToCustomMode" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", - // "httpMethod": "POST", - // "id": "compute.networks.switchToCustomMode", - // "parameterOrder": [ - // "project", - // "network" - // ], - // "parameters": { - // "network": { - // "description": "Name of the network to be updated.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/networks/{network}/switchToCustomMode", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.networks.testIamPermissions": - -type NetworksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *NetworksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworksTestIamPermissionsCall { - c := &NetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NetworksTestIamPermissionsCall) Context(ctx context.Context) *NetworksTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NetworksTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.networks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.networks.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/networks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.projects.disableXpnHost": - -type ProjectsDisableXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// DisableXpnHost: Disable this project as an XPN host project. -func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { - c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsDisableXpnHostCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.disableXpnHost" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Disable this project as an XPN host project.", - // "httpMethod": "POST", - // "id": "compute.projects.disableXpnHost", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/disableXpnHost", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.projects.disableXpnResource": - -type ProjectsDisableXpnResourceCall struct { - s *Service - project string - projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// DisableXpnResource: Disable an XPN resource associated with this host -// project. -func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { - c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsDisableXpnResourceCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.disableXpnResource" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Disable an XPN resource associated with this host project.", - // "httpMethod": "POST", - // "id": "compute.projects.disableXpnResource", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/disableXpnResource", - // "request": { - // "$ref": "ProjectsDisableXpnResourceRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.projects.enableXpnHost": - -type ProjectsEnableXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// EnableXpnHost: Enable this project as an XPN host project. -func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { - c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsEnableXpnHostCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.enableXpnHost" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Enable this project as an XPN host project.", - // "httpMethod": "POST", - // "id": "compute.projects.enableXpnHost", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/enableXpnHost", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.projects.enableXpnResource": - -type ProjectsEnableXpnResourceCall struct { - s *Service - project string - projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// EnableXpnResource: Enable XPN resource (a.k.a service project or -// service folder in the future) for a host project, so that subnetworks -// in the host project can be used by instances in the service project -// or folder. -func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { - c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsEnableXpnResourceCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.enableXpnResource" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Enable XPN resource (a.k.a service project or service folder in the future) for a host project, so that subnetworks in the host project can be used by instances in the service project or folder.", - // "httpMethod": "POST", - // "id": "compute.projects.enableXpnResource", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/enableXpnResource", - // "request": { - // "$ref": "ProjectsEnableXpnResourceRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.projects.get": - -type ProjectsGetCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified Project resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get -func (r *ProjectsService) Get(project string) *ProjectsGetCall { - c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.get" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Project{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified Project resource.", - // "httpMethod": "GET", - // "id": "compute.projects.get", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}", - // "response": { - // "$ref": "Project" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.projects.getXpnHost": - -type ProjectsGetXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetXpnHost: Get the XPN host project that this project links to. May -// be empty if no link exists. -func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { - c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsGetXpnHostCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.getXpnHost" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Project{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Get the XPN host project that this project links to. May be empty if no link exists.", - // "httpMethod": "GET", - // "id": "compute.projects.getXpnHost", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/getXpnHost", - // "response": { - // "$ref": "Project" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.projects.getXpnResources": - -type ProjectsGetXpnResourcesCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetXpnResources: Get XPN resources associated with this host project. -func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { - c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": -func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": -func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": -func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": -func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsGetXpnResourcesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.getXpnResources" call. -// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ProjectsGetXpnResources{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Get XPN resources associated with this host project.", - // "httpMethod": "GET", - // "id": "compute.projects.getXpnResources", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/getXpnResources", - // "response": { - // "$ref": "ProjectsGetXpnResources" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.projects.listXpnHosts": - -type ProjectsListXpnHostsCall struct { - s *Service - project string - projectslistxpnhostsrequest *ProjectsListXpnHostsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ListXpnHosts: List all XPN host projects visible to the user in an -// organization. -func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { - c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.projectslistxpnhostsrequest = projectslistxpnhostsrequest - return c -} - -// Filter sets the optional parameter "filter": -func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": -func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": -func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": -func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsListXpnHostsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.listXpnHosts" call. -// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *XpnHostList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &XpnHostList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "List all XPN host projects visible to the user in an organization.", - // "httpMethod": "POST", - // "id": "compute.projects.listXpnHosts", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/listXpnHosts", - // "request": { - // "$ref": "ProjectsListXpnHostsRequest" - // }, - // "response": { - // "$ref": "XpnHostList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.projects.moveDisk": - -type ProjectsMoveDiskCall struct { - s *Service - project string - diskmoverequest *DiskMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// MoveDisk: Moves a persistent disk from one zone to another. -func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { - c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.diskmoverequest = diskmoverequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMoveDiskCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.moveDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Moves a persistent disk from one zone to another.", - // "httpMethod": "POST", - // "id": "compute.projects.moveDisk", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/moveDisk", - // "request": { - // "$ref": "DiskMoveRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.projects.moveInstance": - -type ProjectsMoveInstanceCall struct { - s *Service - project string - instancemoverequest *InstanceMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// MoveInstance: Moves an instance and its attached persistent disks -// from one zone to another. -func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { - c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.instancemoverequest = instancemoverequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMoveInstanceCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.moveInstance" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Moves an instance and its attached persistent disks from one zone to another.", - // "httpMethod": "POST", - // "id": "compute.projects.moveInstance", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/moveInstance", - // "request": { - // "$ref": "InstanceMoveRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.projects.setCommonInstanceMetadata": - -type ProjectsSetCommonInstanceMetadataCall struct { - s *Service - project string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetCommonInstanceMetadata: Sets metadata common to all instances -// within the specified project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata -func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { - c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.metadata = metadata - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.setCommonInstanceMetadata" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.projects.setCommonInstanceMetadata", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/setCommonInstanceMetadata", - // "request": { - // "$ref": "Metadata" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.projects.setUsageExportBucket": - -type ProjectsSetUsageExportBucketCall struct { - s *Service - project string - usageexportlocation *UsageExportLocation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetUsageExportBucket: Enables the usage export feature and sets the -// usage export bucket where reports are stored. If you provide an empty -// request body using this method, the usage export feature will be -// disabled. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket -func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { - c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.usageexportlocation = usageexportlocation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.setUsageExportBucket" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", - // "httpMethod": "POST", - // "id": "compute.projects.setUsageExportBucket", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/setUsageExportBucket", - // "request": { - // "$ref": "UsageExportLocation" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "compute.regionAutoscalers.delete": - -type RegionAutoscalersDeleteCall struct { - s *Service - project string - region string - autoscaler string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified autoscaler. -func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { - c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.autoscaler = autoscaler - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionAutoscalersDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionAutoscalers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified autoscaler.", - // "httpMethod": "DELETE", - // "id": "compute.regionAutoscalers.delete", - // "parameterOrder": [ - // "project", - // "region", - // "autoscaler" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionAutoscalers.get": - -type RegionAutoscalersGetCall struct { - s *Service - project string - region string - autoscaler string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified autoscaler. -func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { - c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.autoscaler = autoscaler - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionAutoscalersGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionAutoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Autoscaler{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified autoscaler.", - // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.get", - // "parameterOrder": [ - // "project", - // "region", - // "autoscaler" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", - // "response": { - // "$ref": "Autoscaler" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionAutoscalers.insert": - -type RegionAutoscalersInsertCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. -func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { - c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.autoscaler = autoscaler - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionAutoscalersInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionAutoscalers.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.regionAutoscalers.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionAutoscalers.list": - -type RegionAutoscalersListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of autoscalers contained within the specified -// region. -func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { - c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionAutoscalersListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionAutoscalers.list" call. -// Exactly one of *RegionAutoscalerList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionAutoscalerList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RegionAutoscalerList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of autoscalers contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/autoscalers", - // "response": { - // "$ref": "RegionAutoscalerList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionAutoscalers.patch": - -type RegionAutoscalersPatchCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports patch semantics. -func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { - c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionAutoscalersPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionAutoscalers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.regionAutoscalers.patch", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionAutoscalers.testIamPermissions": - -type RegionAutoscalersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionAutoscalersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionAutoscalersTestIamPermissionsCall { - c := &RegionAutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionAutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionAutoscalersTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionAutoscalersTestIamPermissionsCall) Context(ctx context.Context) *RegionAutoscalersTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionAutoscalersTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionAutoscalers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionAutoscalers.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionAutoscalers.update": - -type RegionAutoscalersUpdateCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an autoscaler in the specified project using the data -// included in the request. -func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { - c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionAutoscalersUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionAutoscalers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.regionAutoscalers.update", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionBackendServices.delete": - -type RegionBackendServicesDeleteCall struct { - s *Service - project string - region string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified regional BackendService resource. -func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { - c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.backendService = backendService - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionBackendServicesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionBackendServices.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified regional BackendService resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionBackendServices.delete", - // "parameterOrder": [ - // "project", - // "region", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionBackendServices.get": - -type RegionBackendServicesGetCall struct { - s *Service - project string - region string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified regional BackendService resource. -func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { - c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.backendService = backendService - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionBackendServicesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionBackendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendService{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified regional BackendService resource.", - // "httpMethod": "GET", - // "id": "compute.regionBackendServices.get", - // "parameterOrder": [ - // "project", - // "region", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", - // "response": { - // "$ref": "BackendService" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionBackendServices.getHealth": - -type RegionBackendServicesGetHealthCall struct { - s *Service - project string - region string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// GetHealth: Gets the most recent health check results for this -// regional BackendService. -func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { - c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionBackendServicesGetHealthCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionBackendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendServiceGroupHealth{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the most recent health check results for this regional BackendService.", - // "httpMethod": "POST", - // "id": "compute.regionBackendServices.getHealth", - // "parameterOrder": [ - // "project", - // "region", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the queried instance belongs.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, - // "response": { - // "$ref": "BackendServiceGroupHealth" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionBackendServices.insert": - -type RegionBackendServicesInsertCall struct { - s *Service - project string - region string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a regional BackendService resource in the specified -// project using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a regional -// backend service. Read Restrictions and Guidelines for more -// information. -func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { - c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.backendservice = backendservice - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionBackendServicesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionBackendServices.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "POST", - // "id": "compute.regionBackendServices.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/backendServices", - // "request": { - // "$ref": "BackendService" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionBackendServices.list": - -type RegionBackendServicesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of regional BackendService resources -// available to the specified project in the given region. -func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { - c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionBackendServicesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionBackendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendServiceList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", - // "httpMethod": "GET", - // "id": "compute.regionBackendServices.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/backendServices", - // "response": { - // "$ref": "BackendServiceList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionBackendServices.patch": - -type RegionBackendServicesPatchCall struct { - s *Service - project string - region string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports patch semantics. -func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { - c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.backendService = backendService - c.backendservice = backendservice - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionBackendServicesPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionBackendServices.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.regionBackendServices.patch", - // "parameterOrder": [ - // "project", - // "region", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionBackendServices.testIamPermissions": - -type RegionBackendServicesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionBackendServicesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionBackendServicesTestIamPermissionsCall { - c := &RegionBackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionBackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionBackendServicesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionBackendServicesTestIamPermissionsCall) Context(ctx context.Context) *RegionBackendServicesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionBackendServicesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionBackendServices.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionBackendServices.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/backendServices/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionBackendServices.update": - -type RegionBackendServicesUpdateCall struct { - s *Service - project string - region string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. -func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { - c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.backendService = backendService - c.backendservice = backendservice - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionBackendServicesUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionBackendServices.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "PUT", - // "id": "compute.regionBackendServices.update", - // "parameterOrder": [ - // "project", - // "region", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionCommitments.aggregatedList": - -type RegionCommitmentsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of commitments. -func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { - c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *RegionCommitmentsAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *RegionCommitmentsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionCommitmentsAggregatedListCall) Context(ctx context.Context) *RegionCommitmentsAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionCommitments.aggregatedList" call. -// Exactly one of *CommitmentAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *CommitmentAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*CommitmentAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CommitmentAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of commitments.", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/commitments", - // "response": { - // "$ref": "CommitmentAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func(*CommitmentAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionCommitments.get": - -type RegionCommitmentsGetCall struct { - s *Service - project string - region string - commitment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified commitment resource. Get a list of -// available commitments by making a list() request. -func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { - c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.commitment = commitment - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionCommitmentsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "commitment": c.commitment, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionCommitments.get" call. -// Exactly one of *Commitment or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Commitment.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Commitment{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified commitment resource. Get a list of available commitments by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.get", - // "parameterOrder": [ - // "project", - // "region", - // "commitment" - // ], - // "parameters": { - // "commitment": { - // "description": "Name of the commitment to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/commitments/{commitment}", - // "response": { - // "$ref": "Commitment" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionCommitments.insert": - -type RegionCommitmentsInsertCall struct { - s *Service - project string - region string - commitment *Commitment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates an commitment in the specified project using the data -// included in the request. -func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { - c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.commitment = commitment - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionCommitmentsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionCommitments.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates an commitment in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.regionCommitments.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/commitments", - // "request": { - // "$ref": "Commitment" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionCommitments.list": - -type RegionCommitmentsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of commitments contained within the specified -// region. -func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { - c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitmentsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitmentsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionCommitmentsListCall) Context(ctx context.Context) *RegionCommitmentsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionCommitmentsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionCommitments.list" call. -// Exactly one of *CommitmentList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CommitmentList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*CommitmentList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CommitmentList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of commitments contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/commitments", - // "response": { - // "$ref": "CommitmentList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsListCall) Pages(ctx context.Context, f func(*CommitmentList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.abandonInstances": - -type RegionInstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AbandonInstances: Schedules a group action to remove the specified -// instances from the managed instance group. Abandoning an instance -// does not delete the instance, but it does remove the instance from -// any target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { - c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.abandonInstances", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.delete": - -type RegionInstanceGroupManagersDeleteCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. -func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { - c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified managed instance group and all of the instances in that group.", - // "httpMethod": "DELETE", - // "id": "compute.regionInstanceGroupManagers.delete", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.deleteInstances": - -type RegionInstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// DeleteInstances: Schedules a group action to delete the specified -// instances in the managed instance group. The instances are also -// removed from any target pools of which they were a member. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you delete. This operation is marked as DONE -// when the action is scheduled even if the instances are still being -// deleted. You must separately verify the status of the deleting action -// with the listmanagedinstances method. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { - c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.deleteInstances", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.get": - -type RegionInstanceGroupManagersGetCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns all of the details about the specified managed instance -// group. -func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { - c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupManager{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns all of the details about the specified managed instance group.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.get", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group to return.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "response": { - // "$ref": "InstanceGroupManager" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.insert": - -type RegionInstanceGroupManagersInsertCall struct { - s *Service - project string - region string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, it schedules -// an action to create instances in the group using the specified -// instance template. This operation is marked as DONE when the group is -// created even if the instances in the group have not yet been created. -// You must separately verify the status of the individual instances -// with the listmanagedinstances method. -// -// A regional managed instance group can contain up to 2000 instances. -func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { - c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instancegroupmanager = instancegroupmanager - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", - // "request": { - // "$ref": "InstanceGroupManager" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.list": - -type RegionInstanceGroupManagersListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of managed instance groups that are -// contained within the specified region. -func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { - c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.list" call. -// Exactly one of *RegionInstanceGroupManagerList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RegionInstanceGroupManagerList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", - // "response": { - // "$ref": "RegionInstanceGroupManagerList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.listManagedInstances": - -type RegionInstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ListManagedInstances: Lists the instances in the managed instance -// group and instances that are scheduled to be created. The list -// includes any current actions that the group has scheduled for its -// instances. -func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { - c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. -// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade -// r or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RegionInstanceGroupManagersListInstancesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.listManagedInstances", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" - // ], - // "parameters": { - // "filter": { - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", - // "response": { - // "$ref": "RegionInstanceGroupManagersListInstancesResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.patch": - -type RegionInstanceGroupManagersPatchCall struct { - s *Service - project string - region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listmanagedinstances method. This -// method supports patch semantics. -func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { - c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.regionInstanceGroupManagers.patch", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.recreateInstances": - -type RegionInstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// RecreateInstances: Schedules a group action to recreate the specified -// instances in the managed instance group. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the action is -// scheduled even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { - c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.recreateInstances", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersRecreateRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.resize": - -type RegionInstanceGroupManagersResizeCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Resize: Changes the intended size for the managed instance group. If -// you increase the size, the group schedules actions to create new -// instances using the current instance template. If you decrease the -// size, the group schedules delete actions on one or more instances. -// The resize operation is marked DONE when the resize actions are -// scheduled even if the group has not yet added or deleted any -// instances. You must separately verify the status of the creating or -// deleting actions with the listmanagedinstances method. -func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { - c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes the intended size for the managed instance group. If you increase the size, the group schedules actions to create new instances using the current instance template. If you decrease the size, the group schedules delete actions on one or more instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.resize", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager", - // "size" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "size": { - // "description": "Number of instances that should exist in this instance group manager.", - // "format": "int32", - // "location": "query", - // "minimum": "0", - // "required": true, - // "type": "integer" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": - -type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetAutoHealingPolicies: Modifies the autohealing policy for the -// instances in this managed instance group. -func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { - c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Modifies the autohealing policy for the instances in this managed instance group.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": - -type RegionInstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetInstanceTemplate: Sets the instance template to use when creating -// new instances or recreating instances in this group. Existing -// instances are not affected. -func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.setTargetPools": - -type RegionInstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetTargetPools: Modifies the target pools to which all new instances -// in this group are assigned. Existing instances in the group are not -// affected. -func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { - c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setTargetPools", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.testIamPermissions": - -type RegionInstanceGroupManagersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { - c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionInstanceGroupManagers.update": - -type RegionInstanceGroupManagersUpdateCall struct { - s *Service - project string - region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is updated even if the instances in the group have not yet been -// updated. You must separately verify the status of the individual -// instances with the listmanagedinstances method. -func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { - c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", - // "httpMethod": "PUT", - // "id": "compute.regionInstanceGroupManagers.update", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionInstanceGroups.get": - -type RegionInstanceGroupsGetCall struct { - s *Service - project string - region string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified instance group resource. -func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { - c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroup = instanceGroup - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroup{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified instance group resource.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.get", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "Name of the instance group resource to return.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", - // "response": { - // "$ref": "InstanceGroup" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionInstanceGroups.list": - -type RegionInstanceGroupsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of instance group resources contained within -// the specified region. -func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { - c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroups.list" call. -// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionInstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RegionInstanceGroupList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of instance group resources contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroups", - // "response": { - // "$ref": "RegionInstanceGroupList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroups.listInstances": - -type RegionInstanceGroupsListInstancesCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ListInstances: Lists the instances in the specified instance group -// and displays information about the named ports. Depending on the -// specified options, this method can list all instances or only the -// instances that are running. -func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { - c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroups.listInstances" call. -// Exactly one of *RegionInstanceGroupsListInstances or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupsListInstances.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RegionInstanceGroupsListInstances{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.listInstances", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroup" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "instanceGroup": { - // "description": "Name of the regional instance group for which we want to list the instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "RegionInstanceGroupsListInstancesRequest" - // }, - // "response": { - // "$ref": "RegionInstanceGroupsListInstances" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroups.setNamedPorts": - -type RegionInstanceGroupsSetNamedPortsCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetNamedPorts: Sets the named ports for the specified regional -// instance group. -func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { - c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the named ports for the specified regional instance group.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.setNamedPorts", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the regional instance group where the named ports are updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - // "request": { - // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionInstanceGroups.testIamPermissions": - -type RegionInstanceGroupsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { - c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionOperations.delete": - -type RegionOperationsDeleteCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete -func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { - c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionOperationsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionOperations.delete" call. -func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Deletes the specified region-specific Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionOperations.delete", - // "parameterOrder": [ - // "project", - // "region", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/operations/{operation}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionOperations.get": - -type RegionOperationsGetCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Retrieves the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get -func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { - c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionOperationsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionOperations.get" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the specified region-specific Operations resource.", - // "httpMethod": "GET", - // "id": "compute.regionOperations.get", - // "parameterOrder": [ - // "project", - // "region", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/operations/{operation}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionOperations.list": - -type RegionOperationsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of Operation resources contained within the -// specified region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list -func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { - c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionOperationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &OperationList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of Operation resources contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionOperations.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/operations", - // "response": { - // "$ref": "OperationList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regions.get": - -type RegionsGetCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified Region resource. Get a list of available -// regions by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get -func (r *RegionsService) Get(project string, region string) *RegionsGetCall { - c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regions.get" call. -// Exactly one of *Region or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Region.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Region{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regions.get", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}", - // "response": { - // "$ref": "Region" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regions.list": - -type RegionsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of region resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list -func (r *RegionsService) List(project string) *RegionsListCall { - c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionsListCall) Filter(filter string) *RegionsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regions.list" call. -// Exactly one of *RegionList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RegionList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RegionList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of region resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.regions.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions", - // "response": { - // "$ref": "RegionList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routers.aggregatedList": - -type RoutersAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of routers. -func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { - c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RoutersAggregatedListCall) OrderBy(orderBy string) *RoutersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersAggregatedListCall) Context(ctx context.Context) *RoutersAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.aggregatedList" call. -// Exactly one of *RouterAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RouterAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RouterAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of routers.", - // "httpMethod": "GET", - // "id": "compute.routers.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/routers", - // "response": { - // "$ref": "RouterAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RoutersAggregatedListCall) Pages(ctx context.Context, f func(*RouterAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routers.delete": - -type RoutersDeleteCall struct { - s *Service - project string - region string - router string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified Router resource. -func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { - c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersDeleteCall) Context(ctx context.Context) *RoutersDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified Router resource.", - // "httpMethod": "DELETE", - // "id": "compute.routers.delete", - // "parameterOrder": [ - // "project", - // "region", - // "router" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.routers.get": - -type RoutersGetCall struct { - s *Service - project string - region string - router string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified Router resource. Get a list of available -// routers by making a list() request. -func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { - c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersGetCall) Fields(s ...googleapi.Field) *RoutersGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersGetCall) IfNoneMatch(entityTag string) *RoutersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersGetCall) Context(ctx context.Context) *RoutersGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.get" call. -// Exactly one of *Router or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Router.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Router{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified Router resource. Get a list of available routers by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.routers.get", - // "parameterOrder": [ - // "project", - // "region", - // "router" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "response": { - // "$ref": "Router" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.routers.getRouterStatus": - -type RoutersGetRouterStatusCall struct { - s *Service - project string - region string - router string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetRouterStatus: Retrieves runtime information of the specified -// router. -func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { - c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRouterStatusCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRouterStatusCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersGetRouterStatusCall) Context(ctx context.Context) *RoutersGetRouterStatusCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersGetRouterStatusCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.getRouterStatus" call. -// Exactly one of *RouterStatusResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RouterStatusResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterStatusResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RouterStatusResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves runtime information of the specified router.", - // "httpMethod": "GET", - // "id": "compute.routers.getRouterStatus", - // "parameterOrder": [ - // "project", - // "region", - // "router" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to query.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", - // "response": { - // "$ref": "RouterStatusResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.routers.insert": - -type RoutersInsertCall struct { - s *Service - project string - region string - router *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a Router resource in the specified project and region -// using the data included in the request. -func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { - c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersInsertCall) Context(ctx context.Context) *RoutersInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a Router resource in the specified project and region using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.routers.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers", - // "request": { - // "$ref": "Router" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.routers.list": - -type RoutersListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of Router resources available to the specified -// project. -func (r *RoutersService) List(project string, region string) *RoutersListCall { - c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RoutersListCall) Filter(filter string) *RoutersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersListCall) Context(ctx context.Context) *RoutersListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.list" call. -// Exactly one of *RouterList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RouterList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RouterList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of Router resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.routers.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers", - // "response": { - // "$ref": "RouterList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routers.patch": - -type RoutersPatchCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches the specified Router resource with the data included -// in the request. This method supports patch semantics. -func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { - c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - c.router2 = router2 - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersPatchCall) Context(ctx context.Context) *RoutersPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches the specified Router resource with the data included in the request. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.routers.patch", - // "parameterOrder": [ - // "project", - // "region", - // "router" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "request": { - // "$ref": "Router" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.routers.preview": - -type RoutersPreviewCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Preview: Preview fields auto-generated during router create and -// update operations. Calling this method does NOT create or update the -// router. -func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { - c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - c.router2 = router2 - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersPreviewCall) Fields(s ...googleapi.Field) *RoutersPreviewCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersPreviewCall) Context(ctx context.Context) *RoutersPreviewCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersPreviewCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.preview" call. -// Exactly one of *RoutersPreviewResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RoutersPreviewResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RoutersPreviewResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", - // "httpMethod": "POST", - // "id": "compute.routers.preview", - // "parameterOrder": [ - // "project", - // "region", - // "router" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to query.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers/{router}/preview", - // "request": { - // "$ref": "Router" - // }, - // "response": { - // "$ref": "RoutersPreviewResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.routers.testIamPermissions": - -type RoutersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RoutersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutersTestIamPermissionsCall { - c := &RoutersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutersTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersTestIamPermissionsCall) Context(ctx context.Context) *RoutersTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.routers.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.routers.update": - -type RoutersUpdateCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified Router resource with the data included -// in the request. -func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { - c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - c.router2 = router2 - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersUpdateCall) Fields(s ...googleapi.Field) *RoutersUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersUpdateCall) Context(ctx context.Context) *RoutersUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified Router resource with the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.routers.update", - // "parameterOrder": [ - // "project", - // "region", - // "router" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "request": { - // "$ref": "Router" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.routes.delete": - -type RoutesDeleteCall struct { - s *Service - project string - route string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified Route resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete -func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { - c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.route = route - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "route": c.route, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routes.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified Route resource.", - // "httpMethod": "DELETE", - // "id": "compute.routes.delete", - // "parameterOrder": [ - // "project", - // "route" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "route": { - // "description": "Name of the Route resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/routes/{route}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.routes.get": - -type RoutesGetCall struct { - s *Service - project string - route string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified Route resource. Get a list of available -// routes by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get -func (r *RoutesService) Get(project string, route string) *RoutesGetCall { - c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.route = route - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "route": c.route, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routes.get" call. -// Exactly one of *Route or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Route.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Route{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified Route resource. Get a list of available routes by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.routes.get", - // "parameterOrder": [ - // "project", - // "route" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "route": { - // "description": "Name of the Route resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/routes/{route}", - // "response": { - // "$ref": "Route" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.routes.insert": - -type RoutesInsertCall struct { - s *Service - project string - route *Route - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a Route resource in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert -func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { - c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.route = route - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routes.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a Route resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.routes.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/routes", - // "request": { - // "$ref": "Route" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.routes.list": - -type RoutesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of Route resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list -func (r *RoutesService) List(project string) *RoutesListCall { - c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RoutesListCall) Filter(filter string) *RoutesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routes.list" call. -// Exactly one of *RouteList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RouteList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RouteList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of Route resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.routes.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/routes", - // "response": { - // "$ref": "RouteList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RoutesListCall) Pages(ctx context.Context, f func(*RouteList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routes.testIamPermissions": - -type RoutesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RoutesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutesTestIamPermissionsCall { - c := &RoutesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutesTestIamPermissionsCall) Context(ctx context.Context) *RoutesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routes.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.routes.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/routes/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.snapshots.delete": - -type SnapshotsDeleteCall struct { - s *Service - project string - snapshot string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified Snapshot resource. Keep in mind that -// deleting a single snapshot might not necessarily delete all the data -// on that snapshot. If any data on the snapshot that is marked for -// deletion is needed for subsequent snapshots, the data will be moved -// to the next corresponding snapshot. -// -// For more information, see Deleting snaphots. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete -func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { - c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.snapshot = snapshot - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SnapshotsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "snapshot": c.snapshot, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.snapshots.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snaphots.", - // "httpMethod": "DELETE", - // "id": "compute.snapshots.delete", - // "parameterOrder": [ - // "project", - // "snapshot" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "snapshot": { - // "description": "Name of the Snapshot resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/snapshots/{snapshot}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.snapshots.get": - -type SnapshotsGetCall struct { - s *Service - project string - snapshot string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified Snapshot resource. Get a list of available -// snapshots by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get -func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { - c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.snapshot = snapshot - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SnapshotsGetCall) IfNoneMatch(entityTag string) *SnapshotsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SnapshotsGetCall) Context(ctx context.Context) *SnapshotsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SnapshotsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "snapshot": c.snapshot, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.snapshots.get" call. -// Exactly one of *Snapshot or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Snapshot.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Snapshot{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified Snapshot resource. Get a list of available snapshots by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.snapshots.get", - // "parameterOrder": [ - // "project", - // "snapshot" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "snapshot": { - // "description": "Name of the Snapshot resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/snapshots/{snapshot}", - // "response": { - // "$ref": "Snapshot" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.snapshots.list": - -type SnapshotsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of Snapshot resources contained within the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list -func (r *SnapshotsService) List(project string) *SnapshotsListCall { - c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *SnapshotsListCall) OrderBy(orderBy string) *SnapshotsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SnapshotsListCall) Context(ctx context.Context) *SnapshotsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SnapshotsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.snapshots.list" call. -// Exactly one of *SnapshotList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *SnapshotList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &SnapshotList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of Snapshot resources contained within the specified project.", - // "httpMethod": "GET", - // "id": "compute.snapshots.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/snapshots", - // "response": { - // "$ref": "SnapshotList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SnapshotsListCall) Pages(ctx context.Context, f func(*SnapshotList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.snapshots.setLabels": - -type SnapshotsSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetLabels: Sets the labels on a snapshot. To learn more about labels, -// read the Labeling or Tagging Resources documentation. -func (r *SnapshotsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SnapshotsSetLabelsCall { - c := &SnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SnapshotsSetLabelsCall) Fields(s ...googleapi.Field) *SnapshotsSetLabelsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SnapshotsSetLabelsCall) Context(ctx context.Context) *SnapshotsSetLabelsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SnapshotsSetLabelsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setLabels") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.snapshots.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling or Tagging Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.snapshots.setLabels", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/snapshots/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.snapshots.testIamPermissions": - -type SnapshotsTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *SnapshotsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SnapshotsTestIamPermissionsCall { - c := &SnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SnapshotsTestIamPermissionsCall) Fields(s ...googleapi.Field) *SnapshotsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SnapshotsTestIamPermissionsCall) Context(ctx context.Context) *SnapshotsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.snapshots.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.snapshots.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/snapshots/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.sslCertificates.delete": - -type SslCertificatesDeleteCall struct { - s *Service - project string - sslCertificate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified SslCertificate resource. -func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { - c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.sslCertificate = sslCertificate - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SslCertificatesDeleteCall) Fields(s ...googleapi.Field) *SslCertificatesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SslCertificatesDeleteCall) Context(ctx context.Context) *SslCertificatesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SslCertificatesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "sslCertificate": c.sslCertificate, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.sslCertificates.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified SslCertificate resource.", - // "httpMethod": "DELETE", - // "id": "compute.sslCertificates.delete", - // "parameterOrder": [ - // "project", - // "sslCertificate" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/sslCertificates/{sslCertificate}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.sslCertificates.get": - -type SslCertificatesGetCall struct { - s *Service - project string - sslCertificate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified SslCertificate resource. Get a list of -// available SSL certificates by making a list() request. -func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { - c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.sslCertificate = sslCertificate - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SslCertificatesGetCall) Fields(s ...googleapi.Field) *SslCertificatesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SslCertificatesGetCall) IfNoneMatch(entityTag string) *SslCertificatesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SslCertificatesGetCall) Context(ctx context.Context) *SslCertificatesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SslCertificatesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "sslCertificate": c.sslCertificate, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.sslCertificates.get" call. -// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *SslCertificate.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &SslCertificate{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified SslCertificate resource. Get a list of available SSL certificates by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.sslCertificates.get", - // "parameterOrder": [ - // "project", - // "sslCertificate" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/sslCertificates/{sslCertificate}", - // "response": { - // "$ref": "SslCertificate" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.sslCertificates.insert": - -type SslCertificatesInsertCall struct { - s *Service - project string - sslcertificate *SslCertificate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a SslCertificate resource in the specified project -// using the data included in the request. -func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { - c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.sslcertificate = sslcertificate - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SslCertificatesInsertCall) Fields(s ...googleapi.Field) *SslCertificatesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SslCertificatesInsertCall) Context(ctx context.Context) *SslCertificatesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SslCertificatesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.sslCertificates.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.sslCertificates.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/sslCertificates", - // "request": { - // "$ref": "SslCertificate" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.sslCertificates.list": - -type SslCertificatesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of SslCertificate resources available to the -// specified project. -func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { - c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *SslCertificatesListCall) OrderBy(orderBy string) *SslCertificatesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificatesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SslCertificatesListCall) Context(ctx context.Context) *SslCertificatesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SslCertificatesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.sslCertificates.list" call. -// Exactly one of *SslCertificateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SslCertificateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &SslCertificateList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of SslCertificate resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.sslCertificates.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/sslCertificates", - // "response": { - // "$ref": "SslCertificateList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.sslCertificates.testIamPermissions": - -type SslCertificatesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *SslCertificatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SslCertificatesTestIamPermissionsCall { - c := &SslCertificatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SslCertificatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SslCertificatesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SslCertificatesTestIamPermissionsCall) Context(ctx context.Context) *SslCertificatesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SslCertificatesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.sslCertificates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.sslCertificates.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/sslCertificates/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.subnetworks.aggregatedList": - -type SubnetworksAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of subnetworks. -func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregatedListCall { - c := &SubnetworksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *SubnetworksAggregatedListCall) MaxResults(maxResults int64) *SubnetworksAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *SubnetworksAggregatedListCall) OrderBy(orderBy string) *SubnetworksAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *SubnetworksAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SubnetworksAggregatedListCall) Fields(s ...googleapi.Field) *SubnetworksAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SubnetworksAggregatedListCall) IfNoneMatch(entityTag string) *SubnetworksAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksAggregatedListCall) Context(ctx context.Context) *SubnetworksAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SubnetworksAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.aggregatedList" call. -// Exactly one of *SubnetworkAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *SubnetworkAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*SubnetworkAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &SubnetworkAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of subnetworks.", - // "httpMethod": "GET", - // "id": "compute.subnetworks.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/subnetworks", - // "response": { - // "$ref": "SubnetworkAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SubnetworksAggregatedListCall) Pages(ctx context.Context, f func(*SubnetworkAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.subnetworks.delete": - -type SubnetworksDeleteCall struct { - s *Service - project string - region string - subnetwork string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified subnetwork. -func (r *SubnetworksService) Delete(project string, region string, subnetwork string) *SubnetworksDeleteCall { - c := &SubnetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.subnetwork = subnetwork - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SubnetworksDeleteCall) Fields(s ...googleapi.Field) *SubnetworksDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksDeleteCall) Context(ctx context.Context) *SubnetworksDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SubnetworksDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "subnetwork": c.subnetwork, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified subnetwork.", - // "httpMethod": "DELETE", - // "id": "compute.subnetworks.delete", - // "parameterOrder": [ - // "project", - // "region", - // "subnetwork" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "subnetwork": { - // "description": "Name of the Subnetwork resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.subnetworks.expandIpCidrRange": - -type SubnetworksExpandIpCidrRangeCall struct { - s *Service - project string - region string - subnetwork string - subnetworksexpandipcidrrangerequest *SubnetworksExpandIpCidrRangeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ExpandIpCidrRange: Expands the IP CIDR range of the subnetwork to a -// specified value. -func (r *SubnetworksService) ExpandIpCidrRange(project string, region string, subnetwork string, subnetworksexpandipcidrrangerequest *SubnetworksExpandIpCidrRangeRequest) *SubnetworksExpandIpCidrRangeCall { - c := &SubnetworksExpandIpCidrRangeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.subnetwork = subnetwork - c.subnetworksexpandipcidrrangerequest = subnetworksexpandipcidrrangerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SubnetworksExpandIpCidrRangeCall) Fields(s ...googleapi.Field) *SubnetworksExpandIpCidrRangeCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksExpandIpCidrRangeCall) Context(ctx context.Context) *SubnetworksExpandIpCidrRangeCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SubnetworksExpandIpCidrRangeCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetworksexpandipcidrrangerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "subnetwork": c.subnetwork, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.expandIpCidrRange" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Expands the IP CIDR range of the subnetwork to a specified value.", - // "httpMethod": "POST", - // "id": "compute.subnetworks.expandIpCidrRange", - // "parameterOrder": [ - // "project", - // "region", - // "subnetwork" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "subnetwork": { - // "description": "Name of the Subnetwork resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", - // "request": { - // "$ref": "SubnetworksExpandIpCidrRangeRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.subnetworks.get": - -type SubnetworksGetCall struct { - s *Service - project string - region string - subnetwork string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified subnetwork. Get a list of available -// subnetworks list() request. -func (r *SubnetworksService) Get(project string, region string, subnetwork string) *SubnetworksGetCall { - c := &SubnetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.subnetwork = subnetwork - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SubnetworksGetCall) Fields(s ...googleapi.Field) *SubnetworksGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SubnetworksGetCall) IfNoneMatch(entityTag string) *SubnetworksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksGetCall) Context(ctx context.Context) *SubnetworksGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SubnetworksGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "subnetwork": c.subnetwork, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.get" call. -// Exactly one of *Subnetwork or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Subnetwork.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Subnetwork{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified subnetwork. Get a list of available subnetworks list() request.", - // "httpMethod": "GET", - // "id": "compute.subnetworks.get", - // "parameterOrder": [ - // "project", - // "region", - // "subnetwork" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "subnetwork": { - // "description": "Name of the Subnetwork resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", - // "response": { - // "$ref": "Subnetwork" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.subnetworks.getIamPolicy": - -type SubnetworksGetIamPolicyCall struct { - s *Service - project string - region string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *SubnetworksService) GetIamPolicy(project string, region string, resource string) *SubnetworksGetIamPolicyCall { - c := &SubnetworksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SubnetworksGetIamPolicyCall) Fields(s ...googleapi.Field) *SubnetworksGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SubnetworksGetIamPolicyCall) IfNoneMatch(entityTag string) *SubnetworksGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksGetIamPolicyCall) Context(ctx context.Context) *SubnetworksGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SubnetworksGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.subnetworks.getIamPolicy", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.subnetworks.insert": - -type SubnetworksInsertCall struct { - s *Service - project string - region string - subnetwork *Subnetwork - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a subnetwork in the specified project using the data -// included in the request. -func (r *SubnetworksService) Insert(project string, region string, subnetwork *Subnetwork) *SubnetworksInsertCall { - c := &SubnetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.subnetwork = subnetwork - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SubnetworksInsertCall) Fields(s ...googleapi.Field) *SubnetworksInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksInsertCall) Context(ctx context.Context) *SubnetworksInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SubnetworksInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetwork) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a subnetwork in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.subnetworks.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/subnetworks", - // "request": { - // "$ref": "Subnetwork" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.subnetworks.list": - -type SubnetworksListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of subnetworks available to the specified -// project. -func (r *SubnetworksService) List(project string, region string) *SubnetworksListCall { - c := &SubnetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *SubnetworksListCall) MaxResults(maxResults int64) *SubnetworksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *SubnetworksListCall) OrderBy(orderBy string) *SubnetworksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SubnetworksListCall) Fields(s ...googleapi.Field) *SubnetworksListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SubnetworksListCall) IfNoneMatch(entityTag string) *SubnetworksListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksListCall) Context(ctx context.Context) *SubnetworksListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SubnetworksListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.list" call. -// Exactly one of *SubnetworkList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *SubnetworkList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &SubnetworkList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of subnetworks available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.subnetworks.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/subnetworks", - // "response": { - // "$ref": "SubnetworkList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SubnetworksListCall) Pages(ctx context.Context, f func(*SubnetworkList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.subnetworks.setIamPolicy": - -type SubnetworksSetIamPolicyCall struct { - s *Service - project string - region string - resource string - policy *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *SubnetworksService) SetIamPolicy(project string, region string, resource string, policy *Policy) *SubnetworksSetIamPolicyCall { - c := &SubnetworksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.policy = policy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SubnetworksSetIamPolicyCall) Fields(s ...googleapi.Field) *SubnetworksSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksSetIamPolicyCall) Context(ctx context.Context) *SubnetworksSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SubnetworksSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.subnetworks.setIamPolicy", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy", - // "request": { - // "$ref": "Policy" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.subnetworks.setPrivateIpGoogleAccess": - -type SubnetworksSetPrivateIpGoogleAccessCall struct { - s *Service - project string - region string - subnetwork string - subnetworkssetprivateipgoogleaccessrequest *SubnetworksSetPrivateIpGoogleAccessRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetPrivateIpGoogleAccess: Set whether VMs in this subnet can access -// Google services without assigning external IP addresses through -// Cloudpath. -func (r *SubnetworksService) SetPrivateIpGoogleAccess(project string, region string, subnetwork string, subnetworkssetprivateipgoogleaccessrequest *SubnetworksSetPrivateIpGoogleAccessRequest) *SubnetworksSetPrivateIpGoogleAccessCall { - c := &SubnetworksSetPrivateIpGoogleAccessCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.subnetwork = subnetwork - c.subnetworkssetprivateipgoogleaccessrequest = subnetworkssetprivateipgoogleaccessrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SubnetworksSetPrivateIpGoogleAccessCall) Fields(s ...googleapi.Field) *SubnetworksSetPrivateIpGoogleAccessCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksSetPrivateIpGoogleAccessCall) Context(ctx context.Context) *SubnetworksSetPrivateIpGoogleAccessCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SubnetworksSetPrivateIpGoogleAccessCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetworkssetprivateipgoogleaccessrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "subnetwork": c.subnetwork, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.setPrivateIpGoogleAccess" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Cloudpath.", - // "httpMethod": "POST", - // "id": "compute.subnetworks.setPrivateIpGoogleAccess", - // "parameterOrder": [ - // "project", - // "region", - // "subnetwork" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "subnetwork": { - // "description": "Name of the Subnetwork resource.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", - // "request": { - // "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.subnetworks.testIamPermissions": - -type SubnetworksTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *SubnetworksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *SubnetworksTestIamPermissionsCall { - c := &SubnetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SubnetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *SubnetworksTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksTestIamPermissionsCall) Context(ctx context.Context) *SubnetworksTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.subnetworks.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetHttpProxies.delete": - -type TargetHttpProxiesDeleteCall struct { - s *Service - project string - targetHttpProxy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified TargetHttpProxy resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/delete -func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall { - c := &TargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetHttpProxy = targetHttpProxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpProxiesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpProxiesDeleteCall) Context(ctx context.Context) *TargetHttpProxiesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpProxiesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpProxy": c.targetHttpProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpProxies.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified TargetHttpProxy resource.", - // "httpMethod": "DELETE", - // "id": "compute.targetHttpProxies.delete", - // "parameterOrder": [ - // "project", - // "targetHttpProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetHttpProxies.get": - -type TargetHttpProxiesGetCall struct { - s *Service - project string - targetHttpProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified TargetHttpProxy resource. Get a list of -// available target HTTP proxies by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/get -func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall { - c := &TargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetHttpProxy = targetHttpProxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpProxiesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpProxiesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpProxiesGetCall) Context(ctx context.Context) *TargetHttpProxiesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpProxiesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpProxy": c.targetHttpProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpProxies.get" call. -// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetHttpProxy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetHttpProxy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified TargetHttpProxy resource. Get a list of available target HTTP proxies by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.targetHttpProxies.get", - // "parameterOrder": [ - // "project", - // "targetHttpProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", - // "response": { - // "$ref": "TargetHttpProxy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetHttpProxies.insert": - -type TargetHttpProxiesInsertCall struct { - s *Service - project string - targethttpproxy *TargetHttpProxy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a TargetHttpProxy resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/insert -func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall { - c := &TargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targethttpproxy = targethttpproxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpProxiesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpProxiesInsertCall) Context(ctx context.Context) *TargetHttpProxiesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpProxiesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpProxies.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.targetHttpProxies.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetHttpProxies", - // "request": { - // "$ref": "TargetHttpProxy" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetHttpProxies.list": - -type TargetHttpProxiesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of TargetHttpProxy resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/list -func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall { - c := &TargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProxiesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetHttpProxiesListCall) OrderBy(orderBy string) *TargetHttpProxiesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxiesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpProxiesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetHttpProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpProxiesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpProxiesListCall) Context(ctx context.Context) *TargetHttpProxiesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpProxiesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpProxies.list" call. -// Exactly one of *TargetHttpProxyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetHttpProxyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetHttpProxyList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.targetHttpProxies.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetHttpProxies", - // "response": { - // "$ref": "TargetHttpProxyList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetHttpProxies.setUrlMap": - -type TargetHttpProxiesSetUrlMapCall struct { - s *Service - project string - targetHttpProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetUrlMap: Changes the URL map for TargetHttpProxy. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/setUrlMap -func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall { - c := &TargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetHttpProxy = targetHttpProxy - c.urlmapreference = urlmapreference - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpProxiesSetUrlMapCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpProxiesSetUrlMapCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpProxiesSetUrlMapCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpProxy": c.targetHttpProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpProxies.setUrlMap" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes the URL map for TargetHttpProxy.", - // "httpMethod": "POST", - // "id": "compute.targetHttpProxies.setUrlMap", - // "parameterOrder": [ - // "project", - // "targetHttpProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy to set a URL map for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", - // "request": { - // "$ref": "UrlMapReference" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetHttpProxies.testIamPermissions": - -type TargetHttpProxiesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetHttpProxiesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetHttpProxiesTestIamPermissionsCall { - c := &TargetHttpProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetHttpProxiesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpProxiesTestIamPermissionsCall) Context(ctx context.Context) *TargetHttpProxiesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpProxiesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpProxies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.targetHttpProxies.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetHttpProxies/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetHttpsProxies.delete": - -type TargetHttpsProxiesDeleteCall struct { - s *Service - project string - targetHttpsProxy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified TargetHttpsProxy resource. -func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy string) *TargetHttpsProxiesDeleteCall { - c := &TargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetHttpsProxy = targetHttpsProxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpsProxiesDeleteCall) Context(ctx context.Context) *TargetHttpsProxiesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpsProxiesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpsProxies.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified TargetHttpsProxy resource.", - // "httpMethod": "DELETE", - // "id": "compute.targetHttpsProxies.delete", - // "parameterOrder": [ - // "project", - // "targetHttpsProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetHttpsProxies.get": - -type TargetHttpsProxiesGetCall struct { - s *Service - project string - targetHttpsProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified TargetHttpsProxy resource. Get a list of -// available target HTTPS proxies by making a list() request. -func (r *TargetHttpsProxiesService) Get(project string, targetHttpsProxy string) *TargetHttpsProxiesGetCall { - c := &TargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetHttpsProxy = targetHttpsProxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpsProxiesGetCall) Context(ctx context.Context) *TargetHttpsProxiesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpsProxiesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpsProxies.get" call. -// Exactly one of *TargetHttpsProxy or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetHttpsProxy.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetHttpsProxy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified TargetHttpsProxy resource. Get a list of available target HTTPS proxies by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.targetHttpsProxies.get", - // "parameterOrder": [ - // "project", - // "targetHttpsProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", - // "response": { - // "$ref": "TargetHttpsProxy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetHttpsProxies.insert": - -type TargetHttpsProxiesInsertCall struct { - s *Service - project string - targethttpsproxy *TargetHttpsProxy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a TargetHttpsProxy resource in the specified project -// using the data included in the request. -func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *TargetHttpsProxy) *TargetHttpsProxiesInsertCall { - c := &TargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targethttpsproxy = targethttpsproxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpsProxiesInsertCall) Context(ctx context.Context) *TargetHttpsProxiesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpsProxiesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpsProxies.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetHttpsProxies", - // "request": { - // "$ref": "TargetHttpsProxy" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetHttpsProxies.list": - -type TargetHttpsProxiesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of TargetHttpsProxy resources available to -// the specified project. -func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesListCall { - c := &TargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetHttpsProxiesListCall) MaxResults(maxResults int64) *TargetHttpsProxiesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetHttpsProxiesListCall) OrderBy(orderBy string) *TargetHttpsProxiesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsProxiesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpsProxiesListCall) Context(ctx context.Context) *TargetHttpsProxiesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpsProxiesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpsProxies.list" call. -// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetHttpsProxyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetHttpsProxyList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.targetHttpsProxies.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetHttpsProxies", - // "response": { - // "$ref": "TargetHttpsProxyList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetHttpsProxies.setSslCertificates": - -type TargetHttpsProxiesSetSslCertificatesCall struct { - s *Service - project string - targetHttpsProxy string - targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. -func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { - c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetHttpsProxy = targetHttpsProxy - c.targethttpsproxiessetsslcertificatesrequest = targethttpsproxiessetsslcertificatesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetSslCertificatesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *TargetHttpsProxiesSetSslCertificatesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetsslcertificatesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpsProxies.setSslCertificates" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Replaces SslCertificates for TargetHttpsProxy.", - // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.setSslCertificates", - // "parameterOrder": [ - // "project", - // "targetHttpsProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", - // "request": { - // "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetHttpsProxies.setUrlMap": - -type TargetHttpsProxiesSetUrlMapCall struct { - s *Service - project string - targetHttpsProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetUrlMap: Changes the URL map for TargetHttpsProxy. -func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { - c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetHttpsProxy = targetHttpsProxy - c.urlmapreference = urlmapreference - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetUrlMapCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpsProxiesSetUrlMapCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpsProxiesSetUrlMapCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpsProxies.setUrlMap" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes the URL map for TargetHttpsProxy.", - // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.setUrlMap", - // "parameterOrder": [ - // "project", - // "targetHttpsProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", - // "request": { - // "$ref": "UrlMapReference" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetHttpsProxies.testIamPermissions": - -type TargetHttpsProxiesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetHttpsProxiesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetHttpsProxiesTestIamPermissionsCall { - c := &TargetHttpsProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetHttpsProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetHttpsProxiesTestIamPermissionsCall) Context(ctx context.Context) *TargetHttpsProxiesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetHttpsProxiesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetHttpsProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetHttpsProxies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetHttpsProxies/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetInstances.aggregatedList": - -type TargetInstancesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of target instances. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/aggregatedList -func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall { - c := &TargetInstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *TargetInstancesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetInstancesAggregatedListCall) OrderBy(orderBy string) *TargetInstancesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetInstancesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetInstancesAggregatedListCall) Fields(s ...googleapi.Field) *TargetInstancesAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetInstancesAggregatedListCall) IfNoneMatch(entityTag string) *TargetInstancesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetInstancesAggregatedListCall) Context(ctx context.Context) *TargetInstancesAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetInstancesAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetInstances.aggregatedList" call. -// Exactly one of *TargetInstanceAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *TargetInstanceAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetInstanceAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of target instances.", - // "httpMethod": "GET", - // "id": "compute.targetInstances.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/targetInstances", - // "response": { - // "$ref": "TargetInstanceAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetInstancesAggregatedListCall) Pages(ctx context.Context, f func(*TargetInstanceAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetInstances.delete": - -type TargetInstancesDeleteCall struct { - s *Service - project string - zone string - targetInstance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified TargetInstance resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/delete -func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall { - c := &TargetInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.targetInstance = targetInstance - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetInstancesDeleteCall) Fields(s ...googleapi.Field) *TargetInstancesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetInstancesDeleteCall) Context(ctx context.Context) *TargetInstancesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetInstancesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "targetInstance": c.targetInstance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetInstances.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified TargetInstance resource.", - // "httpMethod": "DELETE", - // "id": "compute.targetInstances.delete", - // "parameterOrder": [ - // "project", - // "zone", - // "targetInstance" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetInstance": { - // "description": "Name of the TargetInstance resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetInstances.get": - -type TargetInstancesGetCall struct { - s *Service - project string - zone string - targetInstance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified TargetInstance resource. Get a list of -// available target instances by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/get -func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall { - c := &TargetInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.targetInstance = targetInstance - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetInstancesGetCall) Fields(s ...googleapi.Field) *TargetInstancesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetInstancesGetCall) IfNoneMatch(entityTag string) *TargetInstancesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetInstancesGetCall) Context(ctx context.Context) *TargetInstancesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetInstancesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "targetInstance": c.targetInstance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetInstances.get" call. -// Exactly one of *TargetInstance or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetInstance.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstance, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetInstance{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified TargetInstance resource. Get a list of available target instances by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.targetInstances.get", - // "parameterOrder": [ - // "project", - // "zone", - // "targetInstance" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetInstance": { - // "description": "Name of the TargetInstance resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", - // "response": { - // "$ref": "TargetInstance" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetInstances.insert": - -type TargetInstancesInsertCall struct { - s *Service - project string - zone string - targetinstance *TargetInstance - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a TargetInstance resource in the specified project -// and zone using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/insert -func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall { - c := &TargetInstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.targetinstance = targetinstance - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetInstancesInsertCall) Fields(s ...googleapi.Field) *TargetInstancesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetInstancesInsertCall) Context(ctx context.Context) *TargetInstancesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetInstancesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetinstance) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetInstances.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.targetInstances.insert", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/targetInstances", - // "request": { - // "$ref": "TargetInstance" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetInstances.list": - -type TargetInstancesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of TargetInstance resources available to the -// specified project and zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/list -func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { - c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetInstancesListCall) OrderBy(orderBy string) *TargetInstancesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetInstancesListCall) IfNoneMatch(entityTag string) *TargetInstancesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetInstancesListCall) Context(ctx context.Context) *TargetInstancesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetInstancesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetInstances.list" call. -// Exactly one of *TargetInstanceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetInstanceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetInstanceList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.targetInstances.list", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/targetInstances", - // "response": { - // "$ref": "TargetInstanceList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInstanceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetInstances.testIamPermissions": - -type TargetInstancesTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetInstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetInstancesTestIamPermissionsCall { - c := &TargetInstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetInstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetInstancesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetInstancesTestIamPermissionsCall) Context(ctx context.Context) *TargetInstancesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetInstancesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetInstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetInstances.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetInstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.targetInstances.testIamPermissions", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetPools.addHealthCheck": - -type TargetPoolsAddHealthCheckCall struct { - s *Service - project string - region string - targetPool string - targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AddHealthCheck: Adds health check URLs to a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addHealthCheck -func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall { - c := &TargetPoolsAddHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetPool = targetPool - c.targetpoolsaddhealthcheckrequest = targetpoolsaddhealthcheckrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsAddHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsAddHealthCheckCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsAddHealthCheckCall) Context(ctx context.Context) *TargetPoolsAddHealthCheckCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsAddHealthCheckCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddhealthcheckrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.addHealthCheck" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Adds health check URLs to a target pool.", - // "httpMethod": "POST", - // "id": "compute.targetPools.addHealthCheck", - // "parameterOrder": [ - // "project", - // "region", - // "targetPool" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the target pool to add a health check to.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", - // "request": { - // "$ref": "TargetPoolsAddHealthCheckRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetPools.addInstance": - -type TargetPoolsAddInstanceCall struct { - s *Service - project string - region string - targetPool string - targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AddInstance: Adds an instance to a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addInstance -func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall { - c := &TargetPoolsAddInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetPool = targetPool - c.targetpoolsaddinstancerequest = targetpoolsaddinstancerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsAddInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsAddInstanceCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsAddInstanceCall) Context(ctx context.Context) *TargetPoolsAddInstanceCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsAddInstanceCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddinstancerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.addInstance" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Adds an instance to a target pool.", - // "httpMethod": "POST", - // "id": "compute.targetPools.addInstance", - // "parameterOrder": [ - // "project", - // "region", - // "targetPool" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to add instances to.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", - // "request": { - // "$ref": "TargetPoolsAddInstanceRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetPools.aggregatedList": - -type TargetPoolsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of target pools. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/aggregatedList -func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall { - c := &TargetPoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetPoolsAggregatedListCall) MaxResults(maxResults int64) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetPoolsAggregatedListCall) OrderBy(orderBy string) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsAggregatedListCall) Fields(s ...googleapi.Field) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetPoolsAggregatedListCall) IfNoneMatch(entityTag string) *TargetPoolsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsAggregatedListCall) Context(ctx context.Context) *TargetPoolsAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.aggregatedList" call. -// Exactly one of *TargetPoolAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TargetPoolAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetPoolAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetPoolAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of target pools.", - // "httpMethod": "GET", - // "id": "compute.targetPools.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/targetPools", - // "response": { - // "$ref": "TargetPoolAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetPoolsAggregatedListCall) Pages(ctx context.Context, f func(*TargetPoolAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetPools.delete": - -type TargetPoolsDeleteCall struct { - s *Service - project string - region string - targetPool string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/delete -func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall { - c := &TargetPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetPool = targetPool - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsDeleteCall) Fields(s ...googleapi.Field) *TargetPoolsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsDeleteCall) Context(ctx context.Context) *TargetPoolsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified target pool.", - // "httpMethod": "DELETE", - // "id": "compute.targetPools.delete", - // "parameterOrder": [ - // "project", - // "region", - // "targetPool" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetPools.get": - -type TargetPoolsGetCall struct { - s *Service - project string - region string - targetPool string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified target pool. Get a list of available -// target pools by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/get -func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall { - c := &TargetPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetPool = targetPool - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsGetCall) Fields(s ...googleapi.Field) *TargetPoolsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetPoolsGetCall) IfNoneMatch(entityTag string) *TargetPoolsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsGetCall) Context(ctx context.Context) *TargetPoolsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.get" call. -// Exactly one of *TargetPool or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetPool.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetPool{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified target pool. Get a list of available target pools by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.targetPools.get", - // "parameterOrder": [ - // "project", - // "region", - // "targetPool" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}", - // "response": { - // "$ref": "TargetPool" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetPools.getHealth": - -type TargetPoolsGetHealthCall struct { - s *Service - project string - region string - targetPool string - instancereference *InstanceReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// GetHealth: Gets the most recent health check results for each IP for -// the instance that is referenced by the given target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/getHealth -func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { - c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetPool = targetPool - c.instancereference = instancereference - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsGetHealthCall) Fields(s ...googleapi.Field) *TargetPoolsGetHealthCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsGetHealthCall) Context(ctx context.Context) *TargetPoolsGetHealthCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsGetHealthCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancereference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.getHealth" call. -// Exactly one of *TargetPoolInstanceHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TargetPoolInstanceHealth.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPoolInstanceHealth, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetPoolInstanceHealth{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", - // "httpMethod": "POST", - // "id": "compute.targetPools.getHealth", - // "parameterOrder": [ - // "project", - // "region", - // "targetPool" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to which the queried instance belongs.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", - // "request": { - // "$ref": "InstanceReference" - // }, - // "response": { - // "$ref": "TargetPoolInstanceHealth" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetPools.insert": - -type TargetPoolsInsertCall struct { - s *Service - project string - region string - targetpool *TargetPool - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a target pool in the specified project and region -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/insert -func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall { - c := &TargetPoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetpool = targetpool - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsInsertCall) Fields(s ...googleapi.Field) *TargetPoolsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsInsertCall) Context(ctx context.Context) *TargetPoolsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpool) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a target pool in the specified project and region using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.targetPools.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools", - // "request": { - // "$ref": "TargetPool" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetPools.list": - -type TargetPoolsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of target pools available to the specified -// project and region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/list -func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall { - c := &TargetPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetPoolsListCall) OrderBy(orderBy string) *TargetPoolsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsListCall) Fields(s ...googleapi.Field) *TargetPoolsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetPoolsListCall) IfNoneMatch(entityTag string) *TargetPoolsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsListCall) Context(ctx context.Context) *TargetPoolsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.list" call. -// Exactly one of *TargetPoolList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetPoolList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetPoolList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of target pools available to the specified project and region.", - // "httpMethod": "GET", - // "id": "compute.targetPools.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools", - // "response": { - // "$ref": "TargetPoolList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetPoolsListCall) Pages(ctx context.Context, f func(*TargetPoolList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetPools.removeHealthCheck": - -type TargetPoolsRemoveHealthCheckCall struct { - s *Service - project string - region string - targetPool string - targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// RemoveHealthCheck: Removes health check URL from a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeHealthCheck -func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall { - c := &TargetPoolsRemoveHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetPool = targetPool - c.targetpoolsremovehealthcheckrequest = targetpoolsremovehealthcheckrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsRemoveHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveHealthCheckCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsRemoveHealthCheckCall) Context(ctx context.Context) *TargetPoolsRemoveHealthCheckCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsRemoveHealthCheckCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremovehealthcheckrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.removeHealthCheck" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Removes health check URL from a target pool.", - // "httpMethod": "POST", - // "id": "compute.targetPools.removeHealthCheck", - // "parameterOrder": [ - // "project", - // "region", - // "targetPool" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the target pool to remove health checks from.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", - // "request": { - // "$ref": "TargetPoolsRemoveHealthCheckRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetPools.removeInstance": - -type TargetPoolsRemoveInstanceCall struct { - s *Service - project string - region string - targetPool string - targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// RemoveInstance: Removes instance URL from a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeInstance -func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { - c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetPool = targetPool - c.targetpoolsremoveinstancerequest = targetpoolsremoveinstancerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveInstanceCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsRemoveInstanceCall) Context(ctx context.Context) *TargetPoolsRemoveInstanceCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.removeInstance" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Removes instance URL from a target pool.", - // "httpMethod": "POST", - // "id": "compute.targetPools.removeInstance", - // "parameterOrder": [ - // "project", - // "region", - // "targetPool" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to remove instances from.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", - // "request": { - // "$ref": "TargetPoolsRemoveInstanceRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetPools.setBackup": - -type TargetPoolsSetBackupCall struct { - s *Service - project string - region string - targetPool string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetBackup: Changes a backup target pool's configurations. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/setBackup -func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { - c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetPool = targetPool - c.targetreference = targetreference - return c -} - -// FailoverRatio sets the optional parameter "failoverRatio": New -// failoverRatio value for the target pool. -func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetPoolsSetBackupCall { - c.urlParams_.Set("failoverRatio", fmt.Sprint(failoverRatio)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetBackupCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsSetBackupCall) Context(ctx context.Context) *TargetPoolsSetBackupCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsSetBackupCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.setBackup" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes a backup target pool's configurations.", - // "httpMethod": "POST", - // "id": "compute.targetPools.setBackup", - // "parameterOrder": [ - // "project", - // "region", - // "targetPool" - // ], - // "parameters": { - // "failoverRatio": { - // "description": "New failoverRatio value for the target pool.", - // "format": "float", - // "location": "query", - // "type": "number" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to set a backup pool for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", - // "request": { - // "$ref": "TargetReference" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetPools.testIamPermissions": - -type TargetPoolsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetPoolsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetPoolsTestIamPermissionsCall { - c := &TargetPoolsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetPoolsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsTestIamPermissionsCall) Context(ctx context.Context) *TargetPoolsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetPoolsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.targetPools.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetSslProxies.delete": - -type TargetSslProxiesDeleteCall struct { - s *Service - project string - targetSslProxy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified TargetSslProxy resource. -func (r *TargetSslProxiesService) Delete(project string, targetSslProxy string) *TargetSslProxiesDeleteCall { - c := &TargetSslProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetSslProxy = targetSslProxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetSslProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetSslProxiesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetSslProxiesDeleteCall) Context(ctx context.Context) *TargetSslProxiesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetSslProxiesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetSslProxy": c.targetSslProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetSslProxies.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified TargetSslProxy resource.", - // "httpMethod": "DELETE", - // "id": "compute.targetSslProxies.delete", - // "parameterOrder": [ - // "project", - // "targetSslProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetSslProxy": { - // "description": "Name of the TargetSslProxy resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetSslProxies/{targetSslProxy}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetSslProxies.get": - -type TargetSslProxiesGetCall struct { - s *Service - project string - targetSslProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified TargetSslProxy resource. Get a list of -// available target SSL proxies by making a list() request. -func (r *TargetSslProxiesService) Get(project string, targetSslProxy string) *TargetSslProxiesGetCall { - c := &TargetSslProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetSslProxy = targetSslProxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetSslProxiesGetCall) Fields(s ...googleapi.Field) *TargetSslProxiesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetSslProxiesGetCall) IfNoneMatch(entityTag string) *TargetSslProxiesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetSslProxiesGetCall) Context(ctx context.Context) *TargetSslProxiesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetSslProxiesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetSslProxy": c.targetSslProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetSslProxies.get" call. -// Exactly one of *TargetSslProxy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetSslProxy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslProxy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetSslProxy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified TargetSslProxy resource. Get a list of available target SSL proxies by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.targetSslProxies.get", - // "parameterOrder": [ - // "project", - // "targetSslProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetSslProxy": { - // "description": "Name of the TargetSslProxy resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetSslProxies/{targetSslProxy}", - // "response": { - // "$ref": "TargetSslProxy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetSslProxies.insert": - -type TargetSslProxiesInsertCall struct { - s *Service - project string - targetsslproxy *TargetSslProxy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a TargetSslProxy resource in the specified project -// using the data included in the request. -func (r *TargetSslProxiesService) Insert(project string, targetsslproxy *TargetSslProxy) *TargetSslProxiesInsertCall { - c := &TargetSslProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetsslproxy = targetsslproxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetSslProxiesInsertCall) Fields(s ...googleapi.Field) *TargetSslProxiesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetSslProxiesInsertCall) Context(ctx context.Context) *TargetSslProxiesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetSslProxiesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetSslProxies.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.targetSslProxies.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetSslProxies", - // "request": { - // "$ref": "TargetSslProxy" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetSslProxies.list": - -type TargetSslProxiesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of TargetSslProxy resources available to the -// specified project. -func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall { - c := &TargetSslProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *TargetSslProxiesListCall) Filter(filter string) *TargetSslProxiesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetSslProxiesListCall) MaxResults(maxResults int64) *TargetSslProxiesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetSslProxiesListCall) OrderBy(orderBy string) *TargetSslProxiesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetSslProxiesListCall) PageToken(pageToken string) *TargetSslProxiesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetSslProxiesListCall) Fields(s ...googleapi.Field) *TargetSslProxiesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetSslProxiesListCall) IfNoneMatch(entityTag string) *TargetSslProxiesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetSslProxiesListCall) Context(ctx context.Context) *TargetSslProxiesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetSslProxiesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetSslProxies.list" call. -// Exactly one of *TargetSslProxyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetSslProxyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslProxyList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetSslProxyList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.targetSslProxies.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetSslProxies", - // "response": { - // "$ref": "TargetSslProxyList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetSslProxiesListCall) Pages(ctx context.Context, f func(*TargetSslProxyList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetSslProxies.setBackendService": - -type TargetSslProxiesSetBackendServiceCall struct { - s *Service - project string - targetSslProxy string - targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetBackendService: Changes the BackendService for TargetSslProxy. -func (r *TargetSslProxiesService) SetBackendService(project string, targetSslProxy string, targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest) *TargetSslProxiesSetBackendServiceCall { - c := &TargetSslProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetSslProxy = targetSslProxy - c.targetsslproxiessetbackendservicerequest = targetsslproxiessetbackendservicerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetSslProxiesSetBackendServiceCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetBackendServiceCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetSslProxiesSetBackendServiceCall) Context(ctx context.Context) *TargetSslProxiesSetBackendServiceCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetSslProxiesSetBackendServiceCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetbackendservicerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetSslProxy": c.targetSslProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetSslProxies.setBackendService" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes the BackendService for TargetSslProxy.", - // "httpMethod": "POST", - // "id": "compute.targetSslProxies.setBackendService", - // "parameterOrder": [ - // "project", - // "targetSslProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetSslProxy": { - // "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", - // "request": { - // "$ref": "TargetSslProxiesSetBackendServiceRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetSslProxies.setProxyHeader": - -type TargetSslProxiesSetProxyHeaderCall struct { - s *Service - project string - targetSslProxy string - targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. -func (r *TargetSslProxiesService) SetProxyHeader(project string, targetSslProxy string, targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest) *TargetSslProxiesSetProxyHeaderCall { - c := &TargetSslProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetSslProxy = targetSslProxy - c.targetsslproxiessetproxyheaderrequest = targetsslproxiessetproxyheaderrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetSslProxiesSetProxyHeaderCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetProxyHeaderCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetSslProxiesSetProxyHeaderCall) Context(ctx context.Context) *TargetSslProxiesSetProxyHeaderCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetSslProxiesSetProxyHeaderCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetproxyheaderrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetSslProxy": c.targetSslProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetSslProxies.setProxyHeader" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes the ProxyHeaderType for TargetSslProxy.", - // "httpMethod": "POST", - // "id": "compute.targetSslProxies.setProxyHeader", - // "parameterOrder": [ - // "project", - // "targetSslProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetSslProxy": { - // "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", - // "request": { - // "$ref": "TargetSslProxiesSetProxyHeaderRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetSslProxies.setSslCertificates": - -type TargetSslProxiesSetSslCertificatesCall struct { - s *Service - project string - targetSslProxy string - targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetSslCertificates: Changes SslCertificates for TargetSslProxy. -func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslProxy string, targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest) *TargetSslProxiesSetSslCertificatesCall { - c := &TargetSslProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetSslProxy = targetSslProxy - c.targetsslproxiessetsslcertificatesrequest = targetsslproxiessetsslcertificatesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetSslProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetSslCertificatesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetSslProxiesSetSslCertificatesCall) Context(ctx context.Context) *TargetSslProxiesSetSslCertificatesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetSslProxiesSetSslCertificatesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetsslcertificatesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetSslProxy": c.targetSslProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetSslProxies.setSslCertificates" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes SslCertificates for TargetSslProxy.", - // "httpMethod": "POST", - // "id": "compute.targetSslProxies.setSslCertificates", - // "parameterOrder": [ - // "project", - // "targetSslProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetSslProxy": { - // "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", - // "request": { - // "$ref": "TargetSslProxiesSetSslCertificatesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetSslProxies.testIamPermissions": - -type TargetSslProxiesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetSslProxiesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetSslProxiesTestIamPermissionsCall { - c := &TargetSslProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetSslProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetSslProxiesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetSslProxiesTestIamPermissionsCall) Context(ctx context.Context) *TargetSslProxiesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetSslProxiesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetSslProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetSslProxies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetSslProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.targetSslProxies.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetSslProxies/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetTcpProxies.delete": - -type TargetTcpProxiesDeleteCall struct { - s *Service - project string - targetTcpProxy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified TargetTcpProxy resource. -func (r *TargetTcpProxiesService) Delete(project string, targetTcpProxy string) *TargetTcpProxiesDeleteCall { - c := &TargetTcpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetTcpProxy = targetTcpProxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetTcpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetTcpProxiesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetTcpProxiesDeleteCall) Context(ctx context.Context) *TargetTcpProxiesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetTcpProxiesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetTcpProxy": c.targetTcpProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetTcpProxies.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified TargetTcpProxy resource.", - // "httpMethod": "DELETE", - // "id": "compute.targetTcpProxies.delete", - // "parameterOrder": [ - // "project", - // "targetTcpProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetTcpProxy": { - // "description": "Name of the TargetTcpProxy resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetTcpProxies.get": - -type TargetTcpProxiesGetCall struct { - s *Service - project string - targetTcpProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified TargetTcpProxy resource. Get a list of -// available target TCP proxies by making a list() request. -func (r *TargetTcpProxiesService) Get(project string, targetTcpProxy string) *TargetTcpProxiesGetCall { - c := &TargetTcpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetTcpProxy = targetTcpProxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetTcpProxiesGetCall) Fields(s ...googleapi.Field) *TargetTcpProxiesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetTcpProxiesGetCall) IfNoneMatch(entityTag string) *TargetTcpProxiesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetTcpProxiesGetCall) Context(ctx context.Context) *TargetTcpProxiesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetTcpProxiesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetTcpProxy": c.targetTcpProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetTcpProxies.get" call. -// Exactly one of *TargetTcpProxy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetTcpProxy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetTcpProxy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified TargetTcpProxy resource. Get a list of available target TCP proxies by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.targetTcpProxies.get", - // "parameterOrder": [ - // "project", - // "targetTcpProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetTcpProxy": { - // "description": "Name of the TargetTcpProxy resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", - // "response": { - // "$ref": "TargetTcpProxy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetTcpProxies.insert": - -type TargetTcpProxiesInsertCall struct { - s *Service - project string - targettcpproxy *TargetTcpProxy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a TargetTcpProxy resource in the specified project -// using the data included in the request. -func (r *TargetTcpProxiesService) Insert(project string, targettcpproxy *TargetTcpProxy) *TargetTcpProxiesInsertCall { - c := &TargetTcpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targettcpproxy = targettcpproxy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetTcpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetTcpProxiesInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetTcpProxiesInsertCall) Context(ctx context.Context) *TargetTcpProxiesInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetTcpProxiesInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetTcpProxies.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.targetTcpProxies.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetTcpProxies", - // "request": { - // "$ref": "TargetTcpProxy" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetTcpProxies.list": - -type TargetTcpProxiesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of TargetTcpProxy resources available to the -// specified project. -func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall { - c := &TargetTcpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *TargetTcpProxiesListCall) Filter(filter string) *TargetTcpProxiesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetTcpProxiesListCall) MaxResults(maxResults int64) *TargetTcpProxiesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetTcpProxiesListCall) OrderBy(orderBy string) *TargetTcpProxiesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetTcpProxiesListCall) PageToken(pageToken string) *TargetTcpProxiesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetTcpProxiesListCall) Fields(s ...googleapi.Field) *TargetTcpProxiesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetTcpProxiesListCall) IfNoneMatch(entityTag string) *TargetTcpProxiesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetTcpProxiesListCall) Context(ctx context.Context) *TargetTcpProxiesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetTcpProxiesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetTcpProxies.list" call. -// Exactly one of *TargetTcpProxyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetTcpProxyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxyList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetTcpProxyList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.targetTcpProxies.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetTcpProxies", - // "response": { - // "$ref": "TargetTcpProxyList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetTcpProxiesListCall) Pages(ctx context.Context, f func(*TargetTcpProxyList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetTcpProxies.setBackendService": - -type TargetTcpProxiesSetBackendServiceCall struct { - s *Service - project string - targetTcpProxy string - targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetBackendService: Changes the BackendService for TargetTcpProxy. -func (r *TargetTcpProxiesService) SetBackendService(project string, targetTcpProxy string, targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest) *TargetTcpProxiesSetBackendServiceCall { - c := &TargetTcpProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetTcpProxy = targetTcpProxy - c.targettcpproxiessetbackendservicerequest = targettcpproxiessetbackendservicerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetTcpProxiesSetBackendServiceCall) Fields(s ...googleapi.Field) *TargetTcpProxiesSetBackendServiceCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetTcpProxiesSetBackendServiceCall) Context(ctx context.Context) *TargetTcpProxiesSetBackendServiceCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetTcpProxiesSetBackendServiceCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxiessetbackendservicerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetTcpProxy": c.targetTcpProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetTcpProxies.setBackendService" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes the BackendService for TargetTcpProxy.", - // "httpMethod": "POST", - // "id": "compute.targetTcpProxies.setBackendService", - // "parameterOrder": [ - // "project", - // "targetTcpProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetTcpProxy": { - // "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService", - // "request": { - // "$ref": "TargetTcpProxiesSetBackendServiceRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetTcpProxies.setProxyHeader": - -type TargetTcpProxiesSetProxyHeaderCall struct { - s *Service - project string - targetTcpProxy string - targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetProxyHeader: Changes the ProxyHeaderType for TargetTcpProxy. -func (r *TargetTcpProxiesService) SetProxyHeader(project string, targetTcpProxy string, targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest) *TargetTcpProxiesSetProxyHeaderCall { - c := &TargetTcpProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetTcpProxy = targetTcpProxy - c.targettcpproxiessetproxyheaderrequest = targettcpproxiessetproxyheaderrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetTcpProxiesSetProxyHeaderCall) Fields(s ...googleapi.Field) *TargetTcpProxiesSetProxyHeaderCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetTcpProxiesSetProxyHeaderCall) Context(ctx context.Context) *TargetTcpProxiesSetProxyHeaderCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetTcpProxiesSetProxyHeaderCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxiessetproxyheaderrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetTcpProxy": c.targetTcpProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetTcpProxies.setProxyHeader" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes the ProxyHeaderType for TargetTcpProxy.", - // "httpMethod": "POST", - // "id": "compute.targetTcpProxies.setProxyHeader", - // "parameterOrder": [ - // "project", - // "targetTcpProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "targetTcpProxy": { - // "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader", - // "request": { - // "$ref": "TargetTcpProxiesSetProxyHeaderRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetVpnGateways.aggregatedList": - -type TargetVpnGatewaysAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of target VPN gateways. -func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGatewaysAggregatedListCall { - c := &TargetVpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGatewaysAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetVpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *TargetVpnGatewaysAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetVpnGatewaysAggregatedListCall) OrderBy(orderBy string) *TargetVpnGatewaysAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetVpnGatewaysAggregatedListCall) PageToken(pageToken string) *TargetVpnGatewaysAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetVpnGatewaysAggregatedListCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetVpnGatewaysAggregatedListCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetVpnGatewaysAggregatedListCall) Context(ctx context.Context) *TargetVpnGatewaysAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetVpnGatewaysAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetVpnGateways") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetVpnGateways.aggregatedList" call. -// Exactly one of *TargetVpnGatewayAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *TargetVpnGatewayAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetVpnGatewayAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetVpnGatewayAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of target VPN gateways.", - // "httpMethod": "GET", - // "id": "compute.targetVpnGateways.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/targetVpnGateways", - // "response": { - // "$ref": "TargetVpnGatewayAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetVpnGatewaysAggregatedListCall) Pages(ctx context.Context, f func(*TargetVpnGatewayAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetVpnGateways.delete": - -type TargetVpnGatewaysDeleteCall struct { - s *Service - project string - region string - targetVpnGateway string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified target VPN gateway. -func (r *TargetVpnGatewaysService) Delete(project string, region string, targetVpnGateway string) *TargetVpnGatewaysDeleteCall { - c := &TargetVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetVpnGateway = targetVpnGateway - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetVpnGatewaysDeleteCall) Context(ctx context.Context) *TargetVpnGatewaysDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetVpnGatewaysDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetVpnGateway": c.targetVpnGateway, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetVpnGateways.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified target VPN gateway.", - // "httpMethod": "DELETE", - // "id": "compute.targetVpnGateways.delete", - // "parameterOrder": [ - // "project", - // "region", - // "targetVpnGateway" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetVpnGateway": { - // "description": "Name of the target VPN gateway to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetVpnGateways.get": - -type TargetVpnGatewaysGetCall struct { - s *Service - project string - region string - targetVpnGateway string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified target VPN gateway. Get a list of -// available target VPN gateways by making a list() request. -func (r *TargetVpnGatewaysService) Get(project string, region string, targetVpnGateway string) *TargetVpnGatewaysGetCall { - c := &TargetVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetVpnGateway = targetVpnGateway - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetVpnGatewaysGetCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetVpnGatewaysGetCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetVpnGatewaysGetCall) Context(ctx context.Context) *TargetVpnGatewaysGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetVpnGatewaysGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetVpnGateway": c.targetVpnGateway, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetVpnGateways.get" call. -// Exactly one of *TargetVpnGateway or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetVpnGateway.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnGateway, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetVpnGateway{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified target VPN gateway. Get a list of available target VPN gateways by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.targetVpnGateways.get", - // "parameterOrder": [ - // "project", - // "region", - // "targetVpnGateway" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetVpnGateway": { - // "description": "Name of the target VPN gateway to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", - // "response": { - // "$ref": "TargetVpnGateway" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetVpnGateways.insert": - -type TargetVpnGatewaysInsertCall struct { - s *Service - project string - region string - targetvpngateway *TargetVpnGateway - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a target VPN gateway in the specified project and -// region using the data included in the request. -func (r *TargetVpnGatewaysService) Insert(project string, region string, targetvpngateway *TargetVpnGateway) *TargetVpnGatewaysInsertCall { - c := &TargetVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetvpngateway = targetvpngateway - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetVpnGatewaysInsertCall) Context(ctx context.Context) *TargetVpnGatewaysInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetVpnGatewaysInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetvpngateway) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetVpnGateways.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.targetVpnGateways.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetVpnGateways", - // "request": { - // "$ref": "TargetVpnGateway" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetVpnGateways.list": - -type TargetVpnGatewaysListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of target VPN gateways available to the -// specified project and region. -func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVpnGatewaysListCall { - c := &TargetVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetVpnGatewaysListCall) MaxResults(maxResults int64) *TargetVpnGatewaysListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetVpnGatewaysListCall) OrderBy(orderBy string) *TargetVpnGatewaysListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetVpnGatewaysListCall) PageToken(pageToken string) *TargetVpnGatewaysListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetVpnGatewaysListCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetVpnGatewaysListCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetVpnGatewaysListCall) Context(ctx context.Context) *TargetVpnGatewaysListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetVpnGatewaysListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetVpnGateways.list" call. -// Exactly one of *TargetVpnGatewayList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetVpnGatewayList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpnGatewayList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetVpnGatewayList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of target VPN gateways available to the specified project and region.", - // "httpMethod": "GET", - // "id": "compute.targetVpnGateways.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetVpnGateways", - // "response": { - // "$ref": "TargetVpnGatewayList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetVpnGatewaysListCall) Pages(ctx context.Context, f func(*TargetVpnGatewayList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetVpnGateways.testIamPermissions": - -type TargetVpnGatewaysTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetVpnGatewaysService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetVpnGatewaysTestIamPermissionsCall { - c := &TargetVpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetVpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetVpnGatewaysTestIamPermissionsCall) Context(ctx context.Context) *TargetVpnGatewaysTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetVpnGatewaysTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetVpnGateways.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.targetVpnGateways.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.urlMaps.delete": - -type UrlMapsDeleteCall struct { - s *Service - project string - urlMap string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified UrlMap resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/delete -func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCall { - c := &UrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.urlMap = urlMap - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *UrlMapsDeleteCall) Fields(s ...googleapi.Field) *UrlMapsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *UrlMapsDeleteCall) Context(ctx context.Context) *UrlMapsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *UrlMapsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "urlMap": c.urlMap, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.urlMaps.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified UrlMap resource.", - // "httpMethod": "DELETE", - // "id": "compute.urlMaps.delete", - // "parameterOrder": [ - // "project", - // "urlMap" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/urlMaps/{urlMap}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.urlMaps.get": - -type UrlMapsGetCall struct { - s *Service - project string - urlMap string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified UrlMap resource. Get a list of available -// URL maps by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/get -func (r *UrlMapsService) Get(project string, urlMap string) *UrlMapsGetCall { - c := &UrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.urlMap = urlMap - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *UrlMapsGetCall) Fields(s ...googleapi.Field) *UrlMapsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *UrlMapsGetCall) IfNoneMatch(entityTag string) *UrlMapsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *UrlMapsGetCall) Context(ctx context.Context) *UrlMapsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *UrlMapsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "urlMap": c.urlMap, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.urlMaps.get" call. -// Exactly one of *UrlMap or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *UrlMap.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &UrlMap{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified UrlMap resource. Get a list of available URL maps by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.urlMaps.get", - // "parameterOrder": [ - // "project", - // "urlMap" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/urlMaps/{urlMap}", - // "response": { - // "$ref": "UrlMap" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.urlMaps.insert": - -type UrlMapsInsertCall struct { - s *Service - project string - urlmap *UrlMap - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a UrlMap resource in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/insert -func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCall { - c := &UrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.urlmap = urlmap - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *UrlMapsInsertCall) Fields(s ...googleapi.Field) *UrlMapsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *UrlMapsInsertCall) Context(ctx context.Context) *UrlMapsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *UrlMapsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.urlMaps.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a UrlMap resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.urlMaps.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/urlMaps", - // "request": { - // "$ref": "UrlMap" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.urlMaps.invalidateCache": - -type UrlMapsInvalidateCacheCall struct { - s *Service - project string - urlMap string - cacheinvalidationrule *CacheInvalidationRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// InvalidateCache: Initiates a cache invalidation operation, -// invalidating the specified path, scoped to the specified UrlMap. -func (r *UrlMapsService) InvalidateCache(project string, urlMap string, cacheinvalidationrule *CacheInvalidationRule) *UrlMapsInvalidateCacheCall { - c := &UrlMapsInvalidateCacheCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.urlMap = urlMap - c.cacheinvalidationrule = cacheinvalidationrule - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *UrlMapsInvalidateCacheCall) Fields(s ...googleapi.Field) *UrlMapsInvalidateCacheCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *UrlMapsInvalidateCacheCall) Context(ctx context.Context) *UrlMapsInvalidateCacheCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *UrlMapsInvalidateCacheCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cacheinvalidationrule) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/invalidateCache") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "urlMap": c.urlMap, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.urlMaps.invalidateCache" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", - // "httpMethod": "POST", - // "id": "compute.urlMaps.invalidateCache", - // "parameterOrder": [ - // "project", - // "urlMap" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "urlMap": { - // "description": "Name of the UrlMap scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/urlMaps/{urlMap}/invalidateCache", - // "request": { - // "$ref": "CacheInvalidationRule" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.urlMaps.list": - -type UrlMapsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of UrlMap resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/list -func (r *UrlMapsService) List(project string) *UrlMapsListCall { - c := &UrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *UrlMapsListCall) MaxResults(maxResults int64) *UrlMapsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *UrlMapsListCall) OrderBy(orderBy string) *UrlMapsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *UrlMapsListCall) Fields(s ...googleapi.Field) *UrlMapsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *UrlMapsListCall) IfNoneMatch(entityTag string) *UrlMapsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *UrlMapsListCall) Context(ctx context.Context) *UrlMapsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *UrlMapsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.urlMaps.list" call. -// Exactly one of *UrlMapList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *UrlMapList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &UrlMapList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of UrlMap resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.urlMaps.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/urlMaps", - // "response": { - // "$ref": "UrlMapList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *UrlMapsListCall) Pages(ctx context.Context, f func(*UrlMapList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.urlMaps.patch": - -type UrlMapsPatchCall struct { - s *Service - project string - urlMap string - urlmap *UrlMap - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches the specified UrlMap resource with the data included -// in the request. This method supports patch semantics. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/patch -func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *UrlMapsPatchCall { - c := &UrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.urlMap = urlMap - c.urlmap = urlmap - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *UrlMapsPatchCall) Fields(s ...googleapi.Field) *UrlMapsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *UrlMapsPatchCall) Context(ctx context.Context) *UrlMapsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *UrlMapsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "urlMap": c.urlMap, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.urlMaps.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "compute.urlMaps.patch", - // "parameterOrder": [ - // "project", - // "urlMap" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/urlMaps/{urlMap}", - // "request": { - // "$ref": "UrlMap" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.urlMaps.testIamPermissions": - -type UrlMapsTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *UrlMapsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *UrlMapsTestIamPermissionsCall { - c := &UrlMapsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *UrlMapsTestIamPermissionsCall) Fields(s ...googleapi.Field) *UrlMapsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *UrlMapsTestIamPermissionsCall) Context(ctx context.Context) *UrlMapsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *UrlMapsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *UrlMapsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.urlMaps.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *UrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.urlMaps.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/urlMaps/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.urlMaps.update": - -type UrlMapsUpdateCall struct { - s *Service - project string - urlMap string - urlmap *UrlMap - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified UrlMap resource with the data included -// in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/update -func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) *UrlMapsUpdateCall { - c := &UrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.urlMap = urlMap - c.urlmap = urlmap - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *UrlMapsUpdateCall) Fields(s ...googleapi.Field) *UrlMapsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *UrlMapsUpdateCall) Context(ctx context.Context) *UrlMapsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *UrlMapsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "urlMap": c.urlMap, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.urlMaps.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified UrlMap resource with the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.urlMaps.update", - // "parameterOrder": [ - // "project", - // "urlMap" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/urlMaps/{urlMap}", - // "request": { - // "$ref": "UrlMap" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.urlMaps.validate": - -type UrlMapsValidateCall struct { - s *Service - project string - urlMap string - urlmapsvalidaterequest *UrlMapsValidateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Validate: Runs static validation for the UrlMap. In particular, the -// tests of the provided UrlMap will be run. Calling this method does -// NOT create the UrlMap. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/validate -func (r *UrlMapsService) Validate(project string, urlMap string, urlmapsvalidaterequest *UrlMapsValidateRequest) *UrlMapsValidateCall { - c := &UrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.urlMap = urlMap - c.urlmapsvalidaterequest = urlmapsvalidaterequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *UrlMapsValidateCall) Fields(s ...googleapi.Field) *UrlMapsValidateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *UrlMapsValidateCall) Context(ctx context.Context) *UrlMapsValidateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *UrlMapsValidateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapsvalidaterequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/validate") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "urlMap": c.urlMap, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.urlMaps.validate" call. -// Exactly one of *UrlMapsValidateResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *UrlMapsValidateResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidateResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &UrlMapsValidateResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", - // "httpMethod": "POST", - // "id": "compute.urlMaps.validate", - // "parameterOrder": [ - // "project", - // "urlMap" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to be validated as.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/urlMaps/{urlMap}/validate", - // "request": { - // "$ref": "UrlMapsValidateRequest" - // }, - // "response": { - // "$ref": "UrlMapsValidateResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.vpnTunnels.aggregatedList": - -type VpnTunnelsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of VPN tunnels. -func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregatedListCall { - c := &VpnTunnelsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *VpnTunnelsAggregatedListCall) Filter(filter string) *VpnTunnelsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *VpnTunnelsAggregatedListCall) MaxResults(maxResults int64) *VpnTunnelsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *VpnTunnelsAggregatedListCall) OrderBy(orderBy string) *VpnTunnelsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *VpnTunnelsAggregatedListCall) PageToken(pageToken string) *VpnTunnelsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *VpnTunnelsAggregatedListCall) Fields(s ...googleapi.Field) *VpnTunnelsAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *VpnTunnelsAggregatedListCall) IfNoneMatch(entityTag string) *VpnTunnelsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *VpnTunnelsAggregatedListCall) Context(ctx context.Context) *VpnTunnelsAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *VpnTunnelsAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/vpnTunnels") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.vpnTunnels.aggregatedList" call. -// Exactly one of *VpnTunnelAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *VpnTunnelAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &VpnTunnelAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of VPN tunnels.", - // "httpMethod": "GET", - // "id": "compute.vpnTunnels.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/vpnTunnels", - // "response": { - // "$ref": "VpnTunnelAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *VpnTunnelsAggregatedListCall) Pages(ctx context.Context, f func(*VpnTunnelAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.vpnTunnels.delete": - -type VpnTunnelsDeleteCall struct { - s *Service - project string - region string - vpnTunnel string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified VpnTunnel resource. -func (r *VpnTunnelsService) Delete(project string, region string, vpnTunnel string) *VpnTunnelsDeleteCall { - c := &VpnTunnelsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.vpnTunnel = vpnTunnel - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *VpnTunnelsDeleteCall) Fields(s ...googleapi.Field) *VpnTunnelsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *VpnTunnelsDeleteCall) Context(ctx context.Context) *VpnTunnelsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *VpnTunnelsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "vpnTunnel": c.vpnTunnel, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.vpnTunnels.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified VpnTunnel resource.", - // "httpMethod": "DELETE", - // "id": "compute.vpnTunnels.delete", - // "parameterOrder": [ - // "project", - // "region", - // "vpnTunnel" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "vpnTunnel": { - // "description": "Name of the VpnTunnel resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.vpnTunnels.get": - -type VpnTunnelsGetCall struct { - s *Service - project string - region string - vpnTunnel string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified VpnTunnel resource. Get a list of -// available VPN tunnels by making a list() request. -func (r *VpnTunnelsService) Get(project string, region string, vpnTunnel string) *VpnTunnelsGetCall { - c := &VpnTunnelsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.vpnTunnel = vpnTunnel - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *VpnTunnelsGetCall) Fields(s ...googleapi.Field) *VpnTunnelsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *VpnTunnelsGetCall) IfNoneMatch(entityTag string) *VpnTunnelsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *VpnTunnelsGetCall) Context(ctx context.Context) *VpnTunnelsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *VpnTunnelsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "vpnTunnel": c.vpnTunnel, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.vpnTunnels.get" call. -// Exactly one of *VpnTunnel or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *VpnTunnel.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &VpnTunnel{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified VpnTunnel resource. Get a list of available VPN tunnels by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.vpnTunnels.get", - // "parameterOrder": [ - // "project", - // "region", - // "vpnTunnel" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "vpnTunnel": { - // "description": "Name of the VpnTunnel resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", - // "response": { - // "$ref": "VpnTunnel" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.vpnTunnels.insert": - -type VpnTunnelsInsertCall struct { - s *Service - project string - region string - vpntunnel *VpnTunnel - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a VpnTunnel resource in the specified project and -// region using the data included in the request. -func (r *VpnTunnelsService) Insert(project string, region string, vpntunnel *VpnTunnel) *VpnTunnelsInsertCall { - c := &VpnTunnelsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.vpntunnel = vpntunnel - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *VpnTunnelsInsertCall) Fields(s ...googleapi.Field) *VpnTunnelsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *VpnTunnelsInsertCall) Context(ctx context.Context) *VpnTunnelsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *VpnTunnelsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.vpntunnel) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.vpnTunnels.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.vpnTunnels.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/vpnTunnels", - // "request": { - // "$ref": "VpnTunnel" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.vpnTunnels.list": - -type VpnTunnelsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of VpnTunnel resources contained in the -// specified project and region. -func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListCall { - c := &VpnTunnelsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *VpnTunnelsListCall) Filter(filter string) *VpnTunnelsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *VpnTunnelsListCall) MaxResults(maxResults int64) *VpnTunnelsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *VpnTunnelsListCall) OrderBy(orderBy string) *VpnTunnelsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *VpnTunnelsListCall) PageToken(pageToken string) *VpnTunnelsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *VpnTunnelsListCall) Fields(s ...googleapi.Field) *VpnTunnelsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *VpnTunnelsListCall) IfNoneMatch(entityTag string) *VpnTunnelsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *VpnTunnelsListCall) Context(ctx context.Context) *VpnTunnelsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *VpnTunnelsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.vpnTunnels.list" call. -// Exactly one of *VpnTunnelList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *VpnTunnelList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &VpnTunnelList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", - // "httpMethod": "GET", - // "id": "compute.vpnTunnels.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/vpnTunnels", - // "response": { - // "$ref": "VpnTunnelList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *VpnTunnelsListCall) Pages(ctx context.Context, f func(*VpnTunnelList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.vpnTunnels.testIamPermissions": - -type VpnTunnelsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *VpnTunnelsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *VpnTunnelsTestIamPermissionsCall { - c := &VpnTunnelsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *VpnTunnelsTestIamPermissionsCall) Fields(s ...googleapi.Field) *VpnTunnelsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *VpnTunnelsTestIamPermissionsCall) Context(ctx context.Context) *VpnTunnelsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *VpnTunnelsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *VpnTunnelsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.vpnTunnels.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *VpnTunnelsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.vpnTunnels.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/vpnTunnels/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.zoneOperations.delete": - -type ZoneOperationsDeleteCall struct { - s *Service - project string - zone string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified zone-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/delete -func (r *ZoneOperationsService) Delete(project string, zone string, operation string) *ZoneOperationsDeleteCall { - c := &ZoneOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ZoneOperationsDeleteCall) Fields(s ...googleapi.Field) *ZoneOperationsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ZoneOperationsDeleteCall) Context(ctx context.Context) *ZoneOperationsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ZoneOperationsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.zoneOperations.delete" call. -func (c *ZoneOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Deletes the specified zone-specific Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.zoneOperations.delete", - // "parameterOrder": [ - // "project", - // "zone", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/operations/{operation}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.zoneOperations.get": - -type ZoneOperationsGetCall struct { - s *Service - project string - zone string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Retrieves the specified zone-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/get -func (r *ZoneOperationsService) Get(project string, zone string, operation string) *ZoneOperationsGetCall { - c := &ZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ZoneOperationsGetCall) Fields(s ...googleapi.Field) *ZoneOperationsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ZoneOperationsGetCall) IfNoneMatch(entityTag string) *ZoneOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ZoneOperationsGetCall) Context(ctx context.Context) *ZoneOperationsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ZoneOperationsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.zoneOperations.get" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the specified zone-specific Operations resource.", - // "httpMethod": "GET", - // "id": "compute.zoneOperations.get", - // "parameterOrder": [ - // "project", - // "zone", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/operations/{operation}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.zoneOperations.list": - -type ZoneOperationsListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of Operation resources contained within the -// specified zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/list -func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperationsListCall { - c := &ZoneOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ZoneOperationsListCall) MaxResults(maxResults int64) *ZoneOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ZoneOperationsListCall) OrderBy(orderBy string) *ZoneOperationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ZoneOperationsListCall) PageToken(pageToken string) *ZoneOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ZoneOperationsListCall) Fields(s ...googleapi.Field) *ZoneOperationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ZoneOperationsListCall) IfNoneMatch(entityTag string) *ZoneOperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ZoneOperationsListCall) Context(ctx context.Context) *ZoneOperationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ZoneOperationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.zoneOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &OperationList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of Operation resources contained within the specified zone.", - // "httpMethod": "GET", - // "id": "compute.zoneOperations.list", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/operations", - // "response": { - // "$ref": "OperationList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ZoneOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.zones.get": - -type ZonesGetCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified Zone resource. Get a list of available -// zones by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/zones/get -func (r *ZonesService) Get(project string, zone string) *ZonesGetCall { - c := &ZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ZonesGetCall) Fields(s ...googleapi.Field) *ZonesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ZonesGetCall) IfNoneMatch(entityTag string) *ZonesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ZonesGetCall) Context(ctx context.Context) *ZonesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ZonesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.zones.get" call. -// Exactly one of *Zone or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Zone.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Zone{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified Zone resource. Get a list of available zones by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.zones.get", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}", - // "response": { - // "$ref": "Zone" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.zones.list": - -type ZonesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of Zone resources available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/zones/list -func (r *ZonesService) List(project string) *ZonesListCall { - c := &ZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter expression -// for filtering listed resources, in the form filter={expression}. Your -// {expression} must be in the format: field_name comparison_string -// literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use filter=name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *ZonesListCall) Filter(filter string) *ZonesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ZonesListCall) MaxResults(maxResults int64) *ZonesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ZonesListCall) OrderBy(orderBy string) *ZonesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ZonesListCall) PageToken(pageToken string) *ZonesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ZonesListCall) Fields(s ...googleapi.Field) *ZonesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ZonesListCall) IfNoneMatch(entityTag string) *ZonesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ZonesListCall) Context(ctx context.Context) *ZonesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ZonesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.zones.list" call. -// Exactly one of *ZoneList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *ZoneList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ZoneList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of Zone resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.zones.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones", - // "response": { - // "$ref": "ZoneList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ZonesListCall) Pages(ctx context.Context, f func(*ZoneList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} diff --git a/vendor/google.golang.org/api/dns/v1/dns-api.json b/vendor/google.golang.org/api/dns/v1/dns-api.json deleted file mode 100644 index 38bdda5bd62..00000000000 --- a/vendor/google.golang.org/api/dns/v1/dns-api.json +++ /dev/null @@ -1,708 +0,0 @@ -{ - "kind": "discovery#restDescription", - "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/RqBsQyB2YZT-ZAkK7pcLByI9SZs\"", - "discoveryVersion": "v1", - "id": "dns:v1", - "name": "dns", - "version": "v1", - "revision": "20161110", - "title": "Google Cloud DNS API", - "description": "Configures and serves authoritative DNS records.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", - "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" - }, - "documentationLink": "https://developers.google.com/cloud-dns", - "protocol": "rest", - "baseUrl": "https://www.googleapis.com/dns/v1/projects/", - "basePath": "/dns/v1/projects/", - "rootUrl": "https://www.googleapis.com/", - "servicePath": "dns/v1/projects/", - "batchPath": "batch", - "parameters": { - "alt": { - "type": "string", - "description": "Data format for the response.", - "default": "json", - "enum": [ - "json" - ], - "enumDescriptions": [ - "Responses with Content-Type of application/json" - ], - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", - "location": "query" - }, - "userIp": { - "type": "string", - "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/cloud-platform.read-only": { - "description": "View your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/ndev.clouddns.readonly": { - "description": "View your DNS records hosted by Google Cloud DNS" - }, - "https://www.googleapis.com/auth/ndev.clouddns.readwrite": { - "description": "View and manage your DNS records hosted by Google Cloud DNS" - } - } - } - }, - "schemas": { - "Change": { - "id": "Change", - "type": "object", - "description": "An atomic update to a collection of ResourceRecordSets.", - "properties": { - "additions": { - "type": "array", - "description": "Which ResourceRecordSets to add?", - "items": { - "$ref": "ResourceRecordSet" - } - }, - "deletions": { - "type": "array", - "description": "Which ResourceRecordSets to remove? Must match existing data exactly.", - "items": { - "$ref": "ResourceRecordSet" - } - }, - "id": { - "type": "string", - "description": "Unique identifier for the resource; defined by the server (output only)." - }, - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#change\".", - "default": "dns#change" - }, - "startTime": { - "type": "string", - "description": "The time that this operation was started by the server (output only). This is in RFC3339 text format." - }, - "status": { - "type": "string", - "description": "Status of the operation (output only).", - "enum": [ - "done", - "pending" - ], - "enumDescriptions": [ - "", - "" - ] - } - } - }, - "ChangesListResponse": { - "id": "ChangesListResponse", - "type": "object", - "description": "The response to a request to enumerate Changes to a ResourceRecordSets collection.", - "properties": { - "changes": { - "type": "array", - "description": "The requested changes.", - "items": { - "$ref": "Change" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "dns#changesListResponse" - }, - "nextPageToken": { - "type": "string", - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size." - } - } - }, - "ManagedZone": { - "id": "ManagedZone", - "type": "object", - "description": "A zone is a subtree of the DNS namespace under one administrative responsibility. A ManagedZone is a resource that represents a DNS zone hosted by the Cloud DNS service.", - "properties": { - "creationTime": { - "type": "string", - "description": "The time that this resource was created on the server. This is in RFC3339 text format. Output only." - }, - "description": { - "type": "string", - "description": "A mutable string of at most 1024 characters associated with this resource for the user's convenience. Has no effect on the managed zone's function." - }, - "dnsName": { - "type": "string", - "description": "The DNS name of this managed zone, for instance \"example.com.\"." - }, - "id": { - "type": "string", - "description": "Unique identifier for the resource; defined by the server (output only)", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZone\".", - "default": "dns#managedZone" - }, - "name": { - "type": "string", - "description": "User assigned name for this resource. Must be unique within the project. The name must be 1-63 characters long, must begin with a letter, end with a letter or digit, and only contain lowercase letters, digits or dashes." - }, - "nameServerSet": { - "type": "string", - "description": "Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet is a set of DNS name servers that all host the same ManagedZones. Most users will leave this field unset." - }, - "nameServers": { - "type": "array", - "description": "Delegate your managed_zone to these virtual name servers; defined by the server (output only)", - "items": { - "type": "string" - } - } - } - }, - "ManagedZonesListResponse": { - "id": "ManagedZonesListResponse", - "type": "object", - "properties": { - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "dns#managedZonesListResponse" - }, - "managedZones": { - "type": "array", - "description": "The managed zone resources.", - "items": { - "$ref": "ManagedZone" - } - }, - "nextPageToken": { - "type": "string", - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size." - } - } - }, - "Project": { - "id": "Project", - "type": "object", - "description": "A project resource. The project is a top level container for resources including Cloud DNS ManagedZones. Projects can be created only in the APIs console.", - "properties": { - "id": { - "type": "string", - "description": "User assigned unique identifier for the resource (output only)." - }, - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#project\".", - "default": "dns#project" - }, - "number": { - "type": "string", - "description": "Unique numeric identifier for the resource; defined by the server (output only).", - "format": "uint64" - }, - "quota": { - "$ref": "Quota", - "description": "Quotas assigned to this project (output only)." - } - } - }, - "Quota": { - "id": "Quota", - "type": "object", - "description": "Limits associated with a Project.", - "properties": { - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#quota\".", - "default": "dns#quota" - }, - "managedZones": { - "type": "integer", - "description": "Maximum allowed number of managed zones in the project.", - "format": "int32" - }, - "resourceRecordsPerRrset": { - "type": "integer", - "description": "Maximum allowed number of ResourceRecords per ResourceRecordSet.", - "format": "int32" - }, - "rrsetAdditionsPerChange": { - "type": "integer", - "description": "Maximum allowed number of ResourceRecordSets to add per ChangesCreateRequest.", - "format": "int32" - }, - "rrsetDeletionsPerChange": { - "type": "integer", - "description": "Maximum allowed number of ResourceRecordSets to delete per ChangesCreateRequest.", - "format": "int32" - }, - "rrsetsPerManagedZone": { - "type": "integer", - "description": "Maximum allowed number of ResourceRecordSets per zone in the project.", - "format": "int32" - }, - "totalRrdataSizePerChange": { - "type": "integer", - "description": "Maximum allowed size for total rrdata in one ChangesCreateRequest in bytes.", - "format": "int32" - } - } - }, - "ResourceRecordSet": { - "id": "ResourceRecordSet", - "type": "object", - "description": "A unit of data that will be returned by the DNS servers.", - "properties": { - "kind": { - "type": "string", - "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#resourceRecordSet\".", - "default": "dns#resourceRecordSet" - }, - "name": { - "type": "string", - "description": "For example, www.example.com." - }, - "rrdatas": { - "type": "array", - "description": "As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1).", - "items": { - "type": "string" - } - }, - "ttl": { - "type": "integer", - "description": "Number of seconds that this ResourceRecordSet can be cached by resolvers.", - "format": "int32" - }, - "type": { - "type": "string", - "description": "The identifier of a supported record type, for example, A, AAAA, MX, TXT, and so on." - } - } - }, - "ResourceRecordSetsListResponse": { - "id": "ResourceRecordSetsListResponse", - "type": "object", - "properties": { - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "dns#resourceRecordSetsListResponse" - }, - "nextPageToken": { - "type": "string", - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size." - }, - "rrsets": { - "type": "array", - "description": "The resource record set resources.", - "items": { - "$ref": "ResourceRecordSet" - } - } - } - } - }, - "resources": { - "changes": { - "methods": { - "create": { - "id": "dns.changes.create", - "path": "{project}/managedZones/{managedZone}/changes", - "httpMethod": "POST", - "description": "Atomically update the ResourceRecordSet collection.", - "parameters": { - "managedZone": { - "type": "string", - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Identifies the project addressed by this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "managedZone" - ], - "request": { - "$ref": "Change" - }, - "response": { - "$ref": "Change" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - ] - }, - "get": { - "id": "dns.changes.get", - "path": "{project}/managedZones/{managedZone}/changes/{changeId}", - "httpMethod": "GET", - "description": "Fetch the representation of an existing Change.", - "parameters": { - "changeId": { - "type": "string", - "description": "The identifier of the requested change, from a previous ResourceRecordSetsChangeResponse.", - "required": true, - "location": "path" - }, - "managedZone": { - "type": "string", - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Identifies the project addressed by this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "managedZone", - "changeId" - ], - "response": { - "$ref": "Change" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/ndev.clouddns.readonly", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - ] - }, - "list": { - "id": "dns.changes.list", - "path": "{project}/managedZones/{managedZone}/changes", - "httpMethod": "GET", - "description": "Enumerate Changes to a ResourceRecordSet collection.", - "parameters": { - "managedZone": { - "type": "string", - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - "required": true, - "location": "path" - }, - "maxResults": { - "type": "integer", - "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", - "format": "int32", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Identifies the project addressed by this request.", - "required": true, - "location": "path" - }, - "sortBy": { - "type": "string", - "description": "Sorting criterion. The only supported value is change sequence.", - "default": "changeSequence", - "enum": [ - "changeSequence" - ], - "enumDescriptions": [ - "" - ], - "location": "query" - }, - "sortOrder": { - "type": "string", - "description": "Sorting order direction: 'ascending' or 'descending'.", - "location": "query" - } - }, - "parameterOrder": [ - "project", - "managedZone" - ], - "response": { - "$ref": "ChangesListResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/ndev.clouddns.readonly", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - ] - } - } - }, - "managedZones": { - "methods": { - "create": { - "id": "dns.managedZones.create", - "path": "{project}/managedZones", - "httpMethod": "POST", - "description": "Create a new ManagedZone.", - "parameters": { - "project": { - "type": "string", - "description": "Identifies the project addressed by this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "ManagedZone" - }, - "response": { - "$ref": "ManagedZone" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - ] - }, - "delete": { - "id": "dns.managedZones.delete", - "path": "{project}/managedZones/{managedZone}", - "httpMethod": "DELETE", - "description": "Delete a previously created ManagedZone.", - "parameters": { - "managedZone": { - "type": "string", - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Identifies the project addressed by this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "managedZone" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - ] - }, - "get": { - "id": "dns.managedZones.get", - "path": "{project}/managedZones/{managedZone}", - "httpMethod": "GET", - "description": "Fetch the representation of an existing ManagedZone.", - "parameters": { - "managedZone": { - "type": "string", - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Identifies the project addressed by this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "managedZone" - ], - "response": { - "$ref": "ManagedZone" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/ndev.clouddns.readonly", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - ] - }, - "list": { - "id": "dns.managedZones.list", - "path": "{project}/managedZones", - "httpMethod": "GET", - "description": "Enumerate ManagedZones that have been created but not yet deleted.", - "parameters": { - "dnsName": { - "type": "string", - "description": "Restricts the list to return only zones with this domain name.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", - "format": "int32", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Identifies the project addressed by this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "ManagedZonesListResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/ndev.clouddns.readonly", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - ] - } - } - }, - "projects": { - "methods": { - "get": { - "id": "dns.projects.get", - "path": "{project}", - "httpMethod": "GET", - "description": "Fetch the representation of an existing Project.", - "parameters": { - "project": { - "type": "string", - "description": "Identifies the project addressed by this request.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "Project" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/ndev.clouddns.readonly", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - ] - } - } - }, - "resourceRecordSets": { - "methods": { - "list": { - "id": "dns.resourceRecordSets.list", - "path": "{project}/managedZones/{managedZone}/rrsets", - "httpMethod": "GET", - "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", - "parameters": { - "managedZone": { - "type": "string", - "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - "required": true, - "location": "path" - }, - "maxResults": { - "type": "integer", - "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", - "format": "int32", - "location": "query" - }, - "name": { - "type": "string", - "description": "Restricts the list to return only records with this fully qualified domain name.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Identifies the project addressed by this request.", - "required": true, - "location": "path" - }, - "type": { - "type": "string", - "description": "Restricts the list to return only records of this type. If present, the \"name\" parameter must also be present.", - "location": "query" - } - }, - "parameterOrder": [ - "project", - "managedZone" - ], - "response": { - "$ref": "ResourceRecordSetsListResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/ndev.clouddns.readonly", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - ] - } - } - } - } -} diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go deleted file mode 100644 index fa66410d622..00000000000 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ /dev/null @@ -1,2028 +0,0 @@ -// Package dns provides access to the Google Cloud DNS API. -// -// See https://developers.google.com/cloud-dns -// -// Usage example: -// -// import "google.golang.org/api/dns/v1" -// ... -// dnsService, err := dns.New(oauthHttpClient) -package dns // import "google.golang.org/api/dns/v1" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" - "io" - "net/http" - "net/url" - "strconv" - "strings" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = ctxhttp.Do - -const apiId = "dns:v1" -const apiName = "dns" -const apiVersion = "v1" -const basePath = "https://www.googleapis.com/dns/v1/projects/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - - // View your data across Google Cloud Platform services - CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" - - // View your DNS records hosted by Google Cloud DNS - NdevClouddnsReadonlyScope = "https://www.googleapis.com/auth/ndev.clouddns.readonly" - - // View and manage your DNS records hosted by Google Cloud DNS - NdevClouddnsReadwriteScope = "https://www.googleapis.com/auth/ndev.clouddns.readwrite" -) - -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.Changes = NewChangesService(s) - s.ManagedZones = NewManagedZonesService(s) - s.Projects = NewProjectsService(s) - s.ResourceRecordSets = NewResourceRecordSetsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - Changes *ChangesService - - ManagedZones *ManagedZonesService - - Projects *ProjectsService - - ResourceRecordSets *ResourceRecordSetsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewChangesService(s *Service) *ChangesService { - rs := &ChangesService{s: s} - return rs -} - -type ChangesService struct { - s *Service -} - -func NewManagedZonesService(s *Service) *ManagedZonesService { - rs := &ManagedZonesService{s: s} - return rs -} - -type ManagedZonesService struct { - s *Service -} - -func NewProjectsService(s *Service) *ProjectsService { - rs := &ProjectsService{s: s} - return rs -} - -type ProjectsService struct { - s *Service -} - -func NewResourceRecordSetsService(s *Service) *ResourceRecordSetsService { - rs := &ResourceRecordSetsService{s: s} - return rs -} - -type ResourceRecordSetsService struct { - s *Service -} - -// Change: An atomic update to a collection of ResourceRecordSets. -type Change struct { - // Additions: Which ResourceRecordSets to add? - Additions []*ResourceRecordSet `json:"additions,omitempty"` - - // Deletions: Which ResourceRecordSets to remove? Must match existing - // data exactly. - Deletions []*ResourceRecordSet `json:"deletions,omitempty"` - - // Id: Unique identifier for the resource; defined by the server (output - // only). - Id string `json:"id,omitempty"` - - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#change". - Kind string `json:"kind,omitempty"` - - // StartTime: The time that this operation was started by the server - // (output only). This is in RFC3339 text format. - StartTime string `json:"startTime,omitempty"` - - // Status: Status of the operation (output only). - // - // Possible values: - // "done" - // "pending" - Status string `json:"status,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Additions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Additions") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Change) MarshalJSON() ([]byte, error) { - type noMethod Change - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ChangesListResponse: The response to a request to enumerate Changes -// to a ResourceRecordSets collection. -type ChangesListResponse struct { - // Changes: The requested changes. - Changes []*Change `json:"changes,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: The presence of this field indicates that there exist - // more results following your last page of results in pagination order. - // To fetch them, make another list request using this value as your - // pagination token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a "snapshot" of collections - // larger than the maximum page size. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Changes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Changes") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ChangesListResponse) MarshalJSON() ([]byte, error) { - type noMethod ChangesListResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ManagedZone: A zone is a subtree of the DNS namespace under one -// administrative responsibility. A ManagedZone is a resource that -// represents a DNS zone hosted by the Cloud DNS service. -type ManagedZone struct { - // CreationTime: The time that this resource was created on the server. - // This is in RFC3339 text format. Output only. - CreationTime string `json:"creationTime,omitempty"` - - // Description: A mutable string of at most 1024 characters associated - // with this resource for the user's convenience. Has no effect on the - // managed zone's function. - Description string `json:"description,omitempty"` - - // DnsName: The DNS name of this managed zone, for instance - // "example.com.". - DnsName string `json:"dnsName,omitempty"` - - // Id: Unique identifier for the resource; defined by the server (output - // only) - Id uint64 `json:"id,omitempty,string"` - - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#managedZone". - Kind string `json:"kind,omitempty"` - - // Name: User assigned name for this resource. Must be unique within the - // project. The name must be 1-63 characters long, must begin with a - // letter, end with a letter or digit, and only contain lowercase - // letters, digits or dashes. - Name string `json:"name,omitempty"` - - // NameServerSet: Optionally specifies the NameServerSet for this - // ManagedZone. A NameServerSet is a set of DNS name servers that all - // host the same ManagedZones. Most users will leave this field unset. - NameServerSet string `json:"nameServerSet,omitempty"` - - // NameServers: Delegate your managed_zone to these virtual name - // servers; defined by the server (output only) - NameServers []string `json:"nameServers,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTime") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTime") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ManagedZone) MarshalJSON() ([]byte, error) { - type noMethod ManagedZone - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ManagedZonesListResponse struct { - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // ManagedZones: The managed zone resources. - ManagedZones []*ManagedZone `json:"managedZones,omitempty"` - - // NextPageToken: The presence of this field indicates that there exist - // more results following your last page of results in pagination order. - // To fetch them, make another list request using this value as your - // page token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a consistent snapshot of a - // collection larger than the maximum page size. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ManagedZonesListResponse) MarshalJSON() ([]byte, error) { - type noMethod ManagedZonesListResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Project: A project resource. The project is a top level container for -// resources including Cloud DNS ManagedZones. Projects can be created -// only in the APIs console. -type Project struct { - // Id: User assigned unique identifier for the resource (output only). - Id string `json:"id,omitempty"` - - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#project". - Kind string `json:"kind,omitempty"` - - // Number: Unique numeric identifier for the resource; defined by the - // server (output only). - Number uint64 `json:"number,omitempty,string"` - - // Quota: Quotas assigned to this project (output only). - Quota *Quota `json:"quota,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Project) MarshalJSON() ([]byte, error) { - type noMethod Project - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Quota: Limits associated with a Project. -type Quota struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#quota". - Kind string `json:"kind,omitempty"` - - // ManagedZones: Maximum allowed number of managed zones in the project. - ManagedZones int64 `json:"managedZones,omitempty"` - - // ResourceRecordsPerRrset: Maximum allowed number of ResourceRecords - // per ResourceRecordSet. - ResourceRecordsPerRrset int64 `json:"resourceRecordsPerRrset,omitempty"` - - // RrsetAdditionsPerChange: Maximum allowed number of ResourceRecordSets - // to add per ChangesCreateRequest. - RrsetAdditionsPerChange int64 `json:"rrsetAdditionsPerChange,omitempty"` - - // RrsetDeletionsPerChange: Maximum allowed number of ResourceRecordSets - // to delete per ChangesCreateRequest. - RrsetDeletionsPerChange int64 `json:"rrsetDeletionsPerChange,omitempty"` - - // RrsetsPerManagedZone: Maximum allowed number of ResourceRecordSets - // per zone in the project. - RrsetsPerManagedZone int64 `json:"rrsetsPerManagedZone,omitempty"` - - // TotalRrdataSizePerChange: Maximum allowed size for total rrdata in - // one ChangesCreateRequest in bytes. - TotalRrdataSizePerChange int64 `json:"totalRrdataSizePerChange,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Quota) MarshalJSON() ([]byte, error) { - type noMethod Quota - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ResourceRecordSet: A unit of data that will be returned by the DNS -// servers. -type ResourceRecordSet struct { - // Kind: Identifies what kind of resource this is. Value: the fixed - // string "dns#resourceRecordSet". - Kind string `json:"kind,omitempty"` - - // Name: For example, www.example.com. - Name string `json:"name,omitempty"` - - // Rrdatas: As defined in RFC 1035 (section 5) and RFC 1034 (section - // 3.6.1). - Rrdatas []string `json:"rrdatas,omitempty"` - - // Ttl: Number of seconds that this ResourceRecordSet can be cached by - // resolvers. - Ttl int64 `json:"ttl,omitempty"` - - // Type: The identifier of a supported record type, for example, A, - // AAAA, MX, TXT, and so on. - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ResourceRecordSet) MarshalJSON() ([]byte, error) { - type noMethod ResourceRecordSet - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ResourceRecordSetsListResponse struct { - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: The presence of this field indicates that there exist - // more results following your last page of results in pagination order. - // To fetch them, make another list request using this value as your - // pagination token. - // - // In this way you can retrieve the complete contents of even very large - // collections one page at a time. However, if the contents of the - // collection change between the first and last paginated list request, - // the set of all elements returned will be an inconsistent view of the - // collection. There is no way to retrieve a consistent snapshot of a - // collection larger than the maximum page size. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Rrsets: The resource record set resources. - Rrsets []*ResourceRecordSet `json:"rrsets,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ResourceRecordSetsListResponse) MarshalJSON() ([]byte, error) { - type noMethod ResourceRecordSetsListResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// method id "dns.changes.create": - -type ChangesCreateCall struct { - s *Service - project string - managedZone string - change *Change - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Atomically update the ResourceRecordSet collection. -func (r *ChangesService) Create(project string, managedZone string, change *Change) *ChangesCreateCall { - c := &ChangesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.managedZone = managedZone - c.change = change - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ChangesCreateCall) Fields(s ...googleapi.Field) *ChangesCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ChangesCreateCall) Context(ctx context.Context) *ChangesCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ChangesCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.change) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "dns.changes.create" call. -// Exactly one of *Change or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Change.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Change{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Atomically update the ResourceRecordSet collection.", - // "httpMethod": "POST", - // "id": "dns.changes.create", - // "parameterOrder": [ - // "project", - // "managedZone" - // ], - // "parameters": { - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Identifies the project addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/managedZones/{managedZone}/changes", - // "request": { - // "$ref": "Change" - // }, - // "response": { - // "$ref": "Change" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - // ] - // } - -} - -// method id "dns.changes.get": - -type ChangesGetCall struct { - s *Service - project string - managedZone string - changeId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Fetch the representation of an existing Change. -func (r *ChangesService) Get(project string, managedZone string, changeId string) *ChangesGetCall { - c := &ChangesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.managedZone = managedZone - c.changeId = changeId - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ChangesGetCall) Fields(s ...googleapi.Field) *ChangesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ChangesGetCall) IfNoneMatch(entityTag string) *ChangesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ChangesGetCall) Context(ctx context.Context) *ChangesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ChangesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes/{changeId}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, - "changeId": c.changeId, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "dns.changes.get" call. -// Exactly one of *Change or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Change.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Change{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Fetch the representation of an existing Change.", - // "httpMethod": "GET", - // "id": "dns.changes.get", - // "parameterOrder": [ - // "project", - // "managedZone", - // "changeId" - // ], - // "parameters": { - // "changeId": { - // "description": "The identifier of the requested change, from a previous ResourceRecordSetsChangeResponse.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Identifies the project addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/managedZones/{managedZone}/changes/{changeId}", - // "response": { - // "$ref": "Change" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", - // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - // ] - // } - -} - -// method id "dns.changes.list": - -type ChangesListCall struct { - s *Service - project string - managedZone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Enumerate Changes to a ResourceRecordSet collection. -func (r *ChangesService) List(project string, managedZone string) *ChangesListCall { - c := &ChangesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.managedZone = managedZone - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server will decide how -// many results to return. -func (c *ChangesListCall) MaxResults(maxResults int64) *ChangesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A tag returned by -// a previous list request that was truncated. Use this parameter to -// continue a previous list request. -func (c *ChangesListCall) PageToken(pageToken string) *ChangesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// SortBy sets the optional parameter "sortBy": Sorting criterion. The -// only supported value is change sequence. -// -// Possible values: -// "changeSequence" (default) -func (c *ChangesListCall) SortBy(sortBy string) *ChangesListCall { - c.urlParams_.Set("sortBy", sortBy) - return c -} - -// SortOrder sets the optional parameter "sortOrder": Sorting order -// direction: 'ascending' or 'descending'. -func (c *ChangesListCall) SortOrder(sortOrder string) *ChangesListCall { - c.urlParams_.Set("sortOrder", sortOrder) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ChangesListCall) Fields(s ...googleapi.Field) *ChangesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ChangesListCall) IfNoneMatch(entityTag string) *ChangesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ChangesListCall) Context(ctx context.Context) *ChangesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ChangesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "dns.changes.list" call. -// Exactly one of *ChangesListResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ChangesListResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ChangesListResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Enumerate Changes to a ResourceRecordSet collection.", - // "httpMethod": "GET", - // "id": "dns.changes.list", - // "parameterOrder": [ - // "project", - // "managedZone" - // ], - // "parameters": { - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Identifies the project addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "sortBy": { - // "default": "changeSequence", - // "description": "Sorting criterion. The only supported value is change sequence.", - // "enum": [ - // "changeSequence" - // ], - // "enumDescriptions": [ - // "" - // ], - // "location": "query", - // "type": "string" - // }, - // "sortOrder": { - // "description": "Sorting order direction: 'ascending' or 'descending'.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/managedZones/{managedZone}/changes", - // "response": { - // "$ref": "ChangesListResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", - // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ChangesListCall) Pages(ctx context.Context, f func(*ChangesListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "dns.managedZones.create": - -type ManagedZonesCreateCall struct { - s *Service - project string - managedzone *ManagedZone - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Create a new ManagedZone. -func (r *ManagedZonesService) Create(project string, managedzone *ManagedZone) *ManagedZonesCreateCall { - c := &ManagedZonesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.managedzone = managedzone - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ManagedZonesCreateCall) Fields(s ...googleapi.Field) *ManagedZonesCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ManagedZonesCreateCall) Context(ctx context.Context) *ManagedZonesCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ManagedZonesCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "dns.managedZones.create" call. -// Exactly one of *ManagedZone or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ManagedZone.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ManagedZone{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Create a new ManagedZone.", - // "httpMethod": "POST", - // "id": "dns.managedZones.create", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Identifies the project addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/managedZones", - // "request": { - // "$ref": "ManagedZone" - // }, - // "response": { - // "$ref": "ManagedZone" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - // ] - // } - -} - -// method id "dns.managedZones.delete": - -type ManagedZonesDeleteCall struct { - s *Service - project string - managedZone string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Delete a previously created ManagedZone. -func (r *ManagedZonesService) Delete(project string, managedZone string) *ManagedZonesDeleteCall { - c := &ManagedZonesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.managedZone = managedZone - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ManagedZonesDeleteCall) Fields(s ...googleapi.Field) *ManagedZonesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ManagedZonesDeleteCall) Context(ctx context.Context) *ManagedZonesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ManagedZonesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "dns.managedZones.delete" call. -func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Delete a previously created ManagedZone.", - // "httpMethod": "DELETE", - // "id": "dns.managedZones.delete", - // "parameterOrder": [ - // "project", - // "managedZone" - // ], - // "parameters": { - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Identifies the project addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/managedZones/{managedZone}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - // ] - // } - -} - -// method id "dns.managedZones.get": - -type ManagedZonesGetCall struct { - s *Service - project string - managedZone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Fetch the representation of an existing ManagedZone. -func (r *ManagedZonesService) Get(project string, managedZone string) *ManagedZonesGetCall { - c := &ManagedZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.managedZone = managedZone - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ManagedZonesGetCall) Fields(s ...googleapi.Field) *ManagedZonesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ManagedZonesGetCall) IfNoneMatch(entityTag string) *ManagedZonesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ManagedZonesGetCall) Context(ctx context.Context) *ManagedZonesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ManagedZonesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "dns.managedZones.get" call. -// Exactly one of *ManagedZone or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ManagedZone.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ManagedZone{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Fetch the representation of an existing ManagedZone.", - // "httpMethod": "GET", - // "id": "dns.managedZones.get", - // "parameterOrder": [ - // "project", - // "managedZone" - // ], - // "parameters": { - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Identifies the project addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/managedZones/{managedZone}", - // "response": { - // "$ref": "ManagedZone" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", - // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - // ] - // } - -} - -// method id "dns.managedZones.list": - -type ManagedZonesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Enumerate ManagedZones that have been created but not yet -// deleted. -func (r *ManagedZonesService) List(project string) *ManagedZonesListCall { - c := &ManagedZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// DnsName sets the optional parameter "dnsName": Restricts the list to -// return only zones with this domain name. -func (c *ManagedZonesListCall) DnsName(dnsName string) *ManagedZonesListCall { - c.urlParams_.Set("dnsName", dnsName) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server will decide how -// many results to return. -func (c *ManagedZonesListCall) MaxResults(maxResults int64) *ManagedZonesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A tag returned by -// a previous list request that was truncated. Use this parameter to -// continue a previous list request. -func (c *ManagedZonesListCall) PageToken(pageToken string) *ManagedZonesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ManagedZonesListCall) Fields(s ...googleapi.Field) *ManagedZonesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ManagedZonesListCall) IfNoneMatch(entityTag string) *ManagedZonesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ManagedZonesListCall) Context(ctx context.Context) *ManagedZonesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ManagedZonesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "dns.managedZones.list" call. -// Exactly one of *ManagedZonesListResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ManagedZonesListResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesListResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ManagedZonesListResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Enumerate ManagedZones that have been created but not yet deleted.", - // "httpMethod": "GET", - // "id": "dns.managedZones.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "dnsName": { - // "description": "Restricts the list to return only zones with this domain name.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Identifies the project addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/managedZones", - // "response": { - // "$ref": "ManagedZonesListResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", - // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ManagedZonesListCall) Pages(ctx context.Context, f func(*ManagedZonesListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "dns.projects.get": - -type ProjectsGetCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Fetch the representation of an existing Project. -func (r *ProjectsService) Get(project string) *ProjectsGetCall { - c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "dns.projects.get" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Project{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Fetch the representation of an existing Project.", - // "httpMethod": "GET", - // "id": "dns.projects.get", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Identifies the project addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}", - // "response": { - // "$ref": "Project" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", - // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - // ] - // } - -} - -// method id "dns.resourceRecordSets.list": - -type ResourceRecordSetsListCall struct { - s *Service - project string - managedZone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Enumerate ResourceRecordSets that have been created but not yet -// deleted. -func (r *ResourceRecordSetsService) List(project string, managedZone string) *ResourceRecordSetsListCall { - c := &ResourceRecordSetsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.managedZone = managedZone - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of results to be returned. If unspecified, the server will decide how -// many results to return. -func (c *ResourceRecordSetsListCall) MaxResults(maxResults int64) *ResourceRecordSetsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// Name sets the optional parameter "name": Restricts the list to return -// only records with this fully qualified domain name. -func (c *ResourceRecordSetsListCall) Name(name string) *ResourceRecordSetsListCall { - c.urlParams_.Set("name", name) - return c -} - -// PageToken sets the optional parameter "pageToken": A tag returned by -// a previous list request that was truncated. Use this parameter to -// continue a previous list request. -func (c *ResourceRecordSetsListCall) PageToken(pageToken string) *ResourceRecordSetsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Type sets the optional parameter "type": Restricts the list to return -// only records of this type. If present, the "name" parameter must also -// be present. -func (c *ResourceRecordSetsListCall) Type(type_ string) *ResourceRecordSetsListCall { - c.urlParams_.Set("type", type_) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ResourceRecordSetsListCall) Fields(s ...googleapi.Field) *ResourceRecordSetsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ResourceRecordSetsListCall) IfNoneMatch(entityTag string) *ResourceRecordSetsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ResourceRecordSetsListCall) Context(ctx context.Context) *ResourceRecordSetsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ResourceRecordSetsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/rrsets") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "managedZone": c.managedZone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "dns.resourceRecordSets.list" call. -// Exactly one of *ResourceRecordSetsListResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ResourceRecordSetsListResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSetsListResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ResourceRecordSetsListResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", - // "httpMethod": "GET", - // "id": "dns.resourceRecordSets.list", - // "parameterOrder": [ - // "project", - // "managedZone" - // ], - // "parameters": { - // "managedZone": { - // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "name": { - // "description": "Restricts the list to return only records with this fully qualified domain name.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Identifies the project addressed by this request.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "type": { - // "description": "Restricts the list to return only records of this type. If present, the \"name\" parameter must also be present.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/managedZones/{managedZone}/rrsets", - // "response": { - // "$ref": "ResourceRecordSetsListResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/ndev.clouddns.readonly", - // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ResourceRecordSetsListCall) Pages(ctx context.Context, f func(*ResourceRecordSetsListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} diff --git a/vendor/google.golang.org/api/logging/v2beta1/logging-api.json b/vendor/google.golang.org/api/logging/v2beta1/logging-api.json deleted file mode 100644 index 27bf67d13c9..00000000000 --- a/vendor/google.golang.org/api/logging/v2beta1/logging-api.json +++ /dev/null @@ -1,1526 +0,0 @@ -{ - "title": "Stackdriver Logging API", - "ownerName": "Google", - "resources": { - "projects": { - "resources": { - "metrics": { - "methods": { - "delete": { - "id": "logging.projects.metrics.delete", - "path": "v2beta1/{+metricName}", - "description": "Deletes a logs-based metric.", - "httpMethod": "DELETE", - "parameterOrder": [ - "metricName" - ], - "response": { - "$ref": "Empty" - }, - "parameters": { - "metricName": { - "description": "The resource name of the metric to delete:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/metrics/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}" - }, - "list": { - "description": "Lists logs-based metrics.", - "httpMethod": "GET", - "response": { - "$ref": "ListLogMetricsResponse" - }, - "parameterOrder": [ - "parent" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "parent": { - "description": "Required. The name of the project containing the metrics:\n\"projects/[PROJECT_ID]\"\n", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - }, - "pageToken": { - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" - }, - "pageSize": { - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer", - "location": "query" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/metrics", - "id": "logging.projects.metrics.list", - "path": "v2beta1/{+parent}/metrics" - }, - "get": { - "description": "Gets a logs-based metric.", - "response": { - "$ref": "LogMetric" - }, - "parameterOrder": [ - "metricName" - ], - "httpMethod": "GET", - "parameters": { - "metricName": { - "pattern": "^projects/[^/]+/metrics/[^/]+$", - "location": "path", - "description": "The resource name of the desired metric:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", - "path": "v2beta1/{+metricName}", - "id": "logging.projects.metrics.get" - }, - "update": { - "id": "logging.projects.metrics.update", - "path": "v2beta1/{+metricName}", - "request": { - "$ref": "LogMetric" - }, - "description": "Creates or updates a logs-based metric.", - "httpMethod": "PUT", - "parameterOrder": [ - "metricName" - ], - "response": { - "$ref": "LogMetric" - }, - "parameters": { - "metricName": { - "pattern": "^projects/[^/]+/metrics/[^/]+$", - "location": "path", - "description": "The resource name of the metric to update:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\nThe updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}" - }, - "create": { - "request": { - "$ref": "LogMetric" - }, - "description": "Creates a logs-based metric.", - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "LogMetric" - }, - "parameters": { - "parent": { - "description": "The resource name of the project in which to create the metric:\n\"projects/[PROJECT_ID]\"\nThe new metric must be provided in the request.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "flatPath": "v2beta1/projects/{projectsId}/metrics", - "id": "logging.projects.metrics.create", - "path": "v2beta1/{+parent}/metrics" - } - } - }, - "logs": { - "methods": { - "delete": { - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "logName" - ], - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "parameters": { - "logName": { - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/logs/[^/]+$", - "location": "path" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/logs/{logsId}", - "path": "v2beta1/{+logName}", - "id": "logging.projects.logs.delete", - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted." - }, - "list": { - "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListLogsResponse" - }, - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "parent": { - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - }, - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string", - "location": "query" - }, - "pageSize": { - "location": "query", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/logs", - "path": "v2beta1/{+parent}/logs", - "id": "logging.projects.logs.list" - } - } - }, - "sinks": { - "methods": { - "create": { - "request": { - "$ref": "LogSink" - }, - "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "LogSink" - }, - "parameters": { - "parent": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", - "required": true, - "type": "string" - }, - "uniqueWriterIdentity": { - "location": "query", - "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", - "type": "boolean" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2beta1/projects/{projectsId}/sinks", - "id": "logging.projects.sinks.create", - "path": "v2beta1/{+parent}/sinks" - }, - "delete": { - "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", - "parameterOrder": [ - "sinkName" - ], - "response": { - "$ref": "Empty" - }, - "httpMethod": "DELETE", - "parameters": { - "sinkName": { - "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/sinks/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", - "path": "v2beta1/{+sinkName}", - "id": "logging.projects.sinks.delete" - }, - "list": { - "response": { - "$ref": "ListSinksResponse" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "parent": { - "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - }, - "pageToken": { - "location": "query", - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" - }, - "pageSize": { - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer", - "location": "query" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/sinks", - "path": "v2beta1/{+parent}/sinks", - "id": "logging.projects.sinks.list", - "description": "Lists sinks." - }, - "get": { - "description": "Gets a sink.", - "httpMethod": "GET", - "response": { - "$ref": "LogSink" - }, - "parameterOrder": [ - "sinkName" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "sinkName": { - "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/sinks/[^/]+$", - "location": "path" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", - "id": "logging.projects.sinks.get", - "path": "v2beta1/{+sinkName}" - }, - "update": { - "httpMethod": "PUT", - "parameterOrder": [ - "sinkName" - ], - "response": { - "$ref": "LogSink" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "parameters": { - "sinkName": { - "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/sinks/[^/]+$", - "location": "path" - }, - "uniqueWriterIdentity": { - "location": "query", - "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.", - "type": "boolean" - } - }, - "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", - "id": "logging.projects.sinks.update", - "path": "v2beta1/{+sinkName}", - "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.", - "request": { - "$ref": "LogSink" - } - } - } - } - } - }, - "billingAccounts": { - "resources": { - "logs": { - "methods": { - "delete": { - "parameters": { - "logName": { - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "required": true, - "type": "string", - "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs/{logsId}", - "id": "logging.billingAccounts.logs.delete", - "path": "v2beta1/{+logName}", - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", - "httpMethod": "DELETE", - "parameterOrder": [ - "logName" - ], - "response": { - "$ref": "Empty" - } - }, - "list": { - "id": "logging.billingAccounts.logs.list", - "path": "v2beta1/{+parent}/logs", - "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - "httpMethod": "GET", - "response": { - "$ref": "ListLogsResponse" - }, - "parameterOrder": [ - "parent" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": { - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string", - "location": "query" - }, - "pageSize": { - "location": "query", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer" - }, - "parent": { - "pattern": "^billingAccounts/[^/]+$", - "location": "path", - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - "required": true, - "type": "string" - } - }, - "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs" - } - } - } - } - }, - "monitoredResourceDescriptors": { - "methods": { - "list": { - "path": "v2beta1/monitoredResourceDescriptors", - "id": "logging.monitoredResourceDescriptors.list", - "description": "Lists the descriptors for monitored resource types used by Stackdriver Logging.", - "parameterOrder": [], - "response": { - "$ref": "ListMonitoredResourceDescriptorsResponse" - }, - "httpMethod": "GET", - "parameters": { - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string", - "location": "query" - }, - "pageSize": { - "location": "query", - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2beta1/monitoredResourceDescriptors" - } - } - }, - "organizations": { - "resources": { - "logs": { - "methods": { - "delete": { - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "logName" - ], - "httpMethod": "DELETE", - "parameters": { - "logName": { - "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "required": true, - "type": "string", - "pattern": "^organizations/[^/]+/logs/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin" - ], - "flatPath": "v2beta1/organizations/{organizationsId}/logs/{logsId}", - "path": "v2beta1/{+logName}", - "id": "logging.organizations.logs.delete", - "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted." - }, - "list": { - "httpMethod": "GET", - "response": { - "$ref": "ListLogsResponse" - }, - "parameterOrder": [ - "parent" - ], - "parameters": { - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string", - "location": "query" - }, - "pageSize": { - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - "format": "int32", - "type": "integer", - "location": "query" - }, - "parent": { - "pattern": "^organizations/[^/]+$", - "location": "path", - "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "flatPath": "v2beta1/organizations/{organizationsId}/logs", - "id": "logging.organizations.logs.list", - "path": "v2beta1/{+parent}/logs", - "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed." - } - } - } - } - }, - "entries": { - "methods": { - "list": { - "response": { - "$ref": "ListLogEntriesResponse" - }, - "parameterOrder": [], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read" - ], - "parameters": {}, - "flatPath": "v2beta1/entries:list", - "path": "v2beta1/entries:list", - "id": "logging.entries.list", - "description": "Lists log entries. Use this method to retrieve log entries from Stackdriver Logging. For ways to export log entries, see Exporting Logs.", - "request": { - "$ref": "ListLogEntriesRequest" - } - }, - "write": { - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.write" - ], - "parameters": {}, - "flatPath": "v2beta1/entries:write", - "id": "logging.entries.write", - "path": "v2beta1/entries:write", - "description": "Writes log entries to Stackdriver Logging.", - "request": { - "$ref": "WriteLogEntriesRequest" - }, - "httpMethod": "POST", - "parameterOrder": [], - "response": { - "$ref": "WriteLogEntriesResponse" - } - } - } - } - }, - "parameters": { - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "type": "boolean", - "default": "true", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "callback": { - "location": "query", - "description": "JSONP", - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "type": "string", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query", - "enum": [ - "1", - "2" - ] - }, - "alt": { - "enum": [ - "json", - "media", - "proto" - ], - "type": "string", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "description": "Data format for response.", - "default": "json" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "location": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string" - }, - "pp": { - "description": "Pretty-print response.", - "type": "boolean", - "default": "true", - "location": "query" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "oauth_token": { - "location": "query", - "description": "OAuth 2.0 token for the current user.", - "type": "string" - } - }, - "version": "v2beta1", - "baseUrl": "https://logging.googleapis.com/", - "servicePath": "", - "description": "Writes log entries and manages your Stackdriver Logging configuration.", - "kind": "discovery#restDescription", - "basePath": "", - "documentationLink": "https://cloud.google.com/logging/docs/", - "revision": "20170410", - "id": "logging:v2beta1", - "discoveryVersion": "v1", - "version_module": "True", - "schemas": { - "MonitoredResourceDescriptor": { - "id": "MonitoredResourceDescriptor", - "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", - "type": "object", - "properties": { - "name": { - "description": "Optional. The resource name of the monitored resource descriptor: \"projects/{project_id}/monitoredResourceDescriptors/{type}\" where {type} is the value of the type field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format \"monitoredResourceDescriptors/{type}\".", - "type": "string" - }, - "displayName": { - "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, \"Google Cloud SQL Database\".", - "type": "string" - }, - "description": { - "description": "Optional. A detailed description of the monitored resource type that might be used in documentation.", - "type": "string" - }, - "type": { - "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters.", - "type": "string" - }, - "labels": { - "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", - "type": "array", - "items": { - "$ref": "LabelDescriptor" - } - } - } - }, - "LogEntrySourceLocation": { - "description": "Additional information about the source code location that produced the log entry.", - "type": "object", - "properties": { - "file": { - "description": "Optional. Source file name. Depending on the runtime environment, this might be a simple name or a fully-qualified name.", - "type": "string" - }, - "function": { - "description": "Optional. Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information may be used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python).", - "type": "string" - }, - "line": { - "description": "Optional. Line within the source file. 1-based; 0 indicates no line number available.", - "format": "int64", - "type": "string" - } - }, - "id": "LogEntrySourceLocation" - }, - "ListLogEntriesResponse": { - "description": "Result returned from ListLogEntries.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.If a value for next_page_token appears and the entries field is empty, it means that the search found no log entries so far but it did not have time to search all the possible log entries. Retry the method with this value for page_token to continue the search. Alternatively, consider speeding up the search by changing your filter to specify a single log name or resource type, or to narrow the time range of the search.", - "type": "string" - }, - "entries": { - "description": "A list of log entries.", - "type": "array", - "items": { - "$ref": "LogEntry" - } - } - }, - "id": "ListLogEntriesResponse" - }, - "LogLine": { - "id": "LogLine", - "description": "Application log line emitted while processing a request.", - "type": "object", - "properties": { - "sourceLocation": { - "$ref": "SourceLocation", - "description": "Where in the source code this log message was written." - }, - "time": { - "description": "Approximate time when this log entry was made.", - "format": "google-datetime", - "type": "string" - }, - "severity": { - "description": "Severity of this log entry.", - "type": "string", - "enumDescriptions": [ - "(0) The log entry has no assigned severity level.", - "(100) Debug or trace information.", - "(200) Routine information, such as ongoing status or performance.", - "(300) Normal but significant events, such as start up, shut down, or a configuration change.", - "(400) Warning events might cause problems.", - "(500) Error events are likely to cause problems.", - "(600) Critical events cause more severe problems or outages.", - "(700) A person must take an action immediately.", - "(800) One or more systems are unusable." - ], - "enum": [ - "DEFAULT", - "DEBUG", - "INFO", - "NOTICE", - "WARNING", - "ERROR", - "CRITICAL", - "ALERT", - "EMERGENCY" - ] - }, - "logMessage": { - "description": "App-provided log message.", - "type": "string" - } - } - }, - "ListLogMetricsResponse": { - "id": "ListLogMetricsResponse", - "description": "Result returned from ListLogMetrics.", - "type": "object", - "properties": { - "metrics": { - "description": "A list of logs-based metrics.", - "type": "array", - "items": { - "$ref": "LogMetric" - } - }, - "nextPageToken": { - "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", - "type": "string" - } - } - }, - "Empty": { - "id": "Empty", - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", - "type": "object", - "properties": {} - }, - "LogEntry": { - "id": "LogEntry", - "description": "An individual entry in a log.", - "type": "object", - "properties": { - "timestamp": { - "description": "Optional. The time the event described by the log entry occurred. If omitted in a new log entry, Stackdriver Logging will insert the time the log entry is received. Stackdriver Logging might reject log entries whose time stamps are more than a couple of hours in the future. Log entries with time stamps in the past are accepted.", - "format": "google-datetime", - "type": "string" - }, - "logName": { - "description": "Required. The resource name of the log to which this log entry belongs:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded within log_name. Example: \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". [LOG_ID] must be less than 512 characters long and can only include the following characters: upper and lower case alphanumeric characters, forward-slash, underscore, hyphen, and period.For backward compatibility, if log_name begins with a forward-slash, such as /projects/..., then the log entry is ingested as usual but the forward-slash is removed. Listing the log entry will not show the leading slash and filtering for a log name with a leading slash will never return any results.", - "type": "string" - }, - "resource": { - "description": "Required. The monitored resource associated with this log entry. Example: a log entry that reports a database error would be associated with the monitored resource designating the particular database that reported the error.", - "$ref": "MonitoredResource" - }, - "httpRequest": { - "description": "Optional. Information about the HTTP request associated with this log entry, if applicable.", - "$ref": "HttpRequest" - }, - "jsonPayload": { - "additionalProperties": { - "description": "Properties of the object.", - "type": "any" - }, - "description": "The log entry payload, represented as a structure that is expressed as a JSON object.", - "type": "object" - }, - "insertId": { - "description": "Optional. A unique identifier for the log entry. If you provide a value, then Stackdriver Logging considers other log entries in the same project, with the same timestamp, and with the same insert_id to be duplicates which can be removed. If omitted in new log entries, then Stackdriver Logging will insert its own unique identifier. The insert_id is used to order log entries that have the same timestamp value.", - "type": "string" - }, - "operation": { - "description": "Optional. Information about an operation associated with the log entry, if applicable.", - "$ref": "LogEntryOperation" - }, - "textPayload": { - "description": "The log entry payload, represented as a Unicode string (UTF-8).", - "type": "string" - }, - "protoPayload": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The log entry payload, represented as a protocol buffer. Some Google Cloud Platform services use this field for their log entry payloads.", - "type": "object" - }, - "labels": { - "description": "Optional. A set of user-defined (key, value) data that provides additional information about the log entry.", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "trace": { - "description": "Optional. Resource name of the trace associated with the log entry, if any. If it contains a relative resource name, the name is assumed to be relative to //tracing.googleapis.com. Example: projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824", - "type": "string" - }, - "severity": { - "enumDescriptions": [ - "(0) The log entry has no assigned severity level.", - "(100) Debug or trace information.", - "(200) Routine information, such as ongoing status or performance.", - "(300) Normal but significant events, such as start up, shut down, or a configuration change.", - "(400) Warning events might cause problems.", - "(500) Error events are likely to cause problems.", - "(600) Critical events cause more severe problems or outages.", - "(700) A person must take an action immediately.", - "(800) One or more systems are unusable." - ], - "enum": [ - "DEFAULT", - "DEBUG", - "INFO", - "NOTICE", - "WARNING", - "ERROR", - "CRITICAL", - "ALERT", - "EMERGENCY" - ], - "description": "Optional. The severity of the log entry. The default value is LogSeverity.DEFAULT.", - "type": "string" - }, - "sourceLocation": { - "$ref": "LogEntrySourceLocation", - "description": "Optional. Source code location information associated with the log entry, if any." - } - } - }, - "SourceLocation": { - "description": "Specifies a location in a source code file.", - "type": "object", - "properties": { - "file": { - "description": "Source file name. Depending on the runtime environment, this might be a simple name or a fully-qualified name.", - "type": "string" - }, - "functionName": { - "description": "Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information is used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python).", - "type": "string" - }, - "line": { - "description": "Line within the source file.", - "format": "int64", - "type": "string" - } - }, - "id": "SourceLocation" - }, - "ListLogEntriesRequest": { - "description": "The parameters to ListLogEntries.", - "type": "object", - "properties": { - "orderBy": { - "description": "Optional. How the results should be sorted. Presently, the only permitted values are \"timestamp asc\" (default) and \"timestamp desc\". The first option returns entries in order of increasing values of LogEntry.timestamp (oldest first), and the second option returns entries in order of decreasing timestamps (newest first). Entries with equal timestamps are returned in order of their insert_id values.", - "type": "string" - }, - "resourceNames": { - "description": "Required. Names of one or more parent resources from which to retrieve log entries:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nProjects listed in the project_ids field are added to this list.", - "type": "array", - "items": { - "type": "string" - } - }, - "projectIds": { - "description": "Deprecated. Use resource_names instead. One or more project identifiers or project numbers from which to retrieve log entries. Example: \"my-project-1A\". If present, these project identifiers are converted to resource name format and added to the list of resources in resource_names.", - "type": "array", - "items": { - "type": "string" - } - }, - "filter": { - "description": "Optional. A filter that chooses which log entries to return. See Advanced Logs Filters. Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of the filter is 20000 characters.", - "type": "string" - }, - "pageToken": { - "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. page_token must be the value of next_page_token from the previous response. The values of other method parameters should be identical to those in the previous call.", - "type": "string" - }, - "pageSize": { - "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of next_page_token in the response indicates that more results might be available.", - "format": "int32", - "type": "integer" - } - }, - "id": "ListLogEntriesRequest" - }, - "RequestLog": { - "description": "Complete log information about a single HTTP request to an App Engine application.", - "type": "object", - "properties": { - "endTime": { - "description": "Time when the request finished.", - "format": "google-datetime", - "type": "string" - }, - "userAgent": { - "description": "User agent that made the request.", - "type": "string" - }, - "wasLoadingRequest": { - "description": "Whether this was a loading request for the instance.", - "type": "boolean" - }, - "sourceReference": { - "description": "Source code for the application that handled this request. There can be more than one source reference per deployed application if source code is distributed among multiple repositories.", - "type": "array", - "items": { - "$ref": "SourceReference" - } - }, - "responseSize": { - "description": "Size in bytes sent back to client by request.", - "format": "int64", - "type": "string" - }, - "traceId": { - "description": "Stackdriver Trace identifier for this request.", - "type": "string" - }, - "line": { - "description": "A list of log lines emitted by the application while serving this request.", - "type": "array", - "items": { - "$ref": "LogLine" - } - }, - "referrer": { - "description": "Referrer URL of request.", - "type": "string" - }, - "taskQueueName": { - "description": "Queue name of the request, in the case of an offline request.", - "type": "string" - }, - "requestId": { - "description": "Globally unique identifier for a request, which is based on the request start time. Request IDs for requests which started later will compare greater as strings than those for requests which started earlier.", - "type": "string" - }, - "nickname": { - "description": "The logged-in user who made the request.Most likely, this is the part of the user's email before the @ sign. The field value is the same for different requests from the same user, but different users can have similar names. This information is also available to the application via the App Engine Users API.This field will be populated starting with App Engine 1.9.21.", - "type": "string" - }, - "status": { - "description": "HTTP response status code. Example: 200, 404.", - "format": "int32", - "type": "integer" - }, - "resource": { - "description": "Contains the path and query portion of the URL that was requested. For example, if the URL was \"http://example.com/app?name=val\", the resource would be \"/app?name=val\". The fragment identifier, which is identified by the # character, is not included.", - "type": "string" - }, - "pendingTime": { - "description": "Time this request spent in the pending request queue.", - "format": "google-duration", - "type": "string" - }, - "taskName": { - "description": "Task name of the request, in the case of an offline request.", - "type": "string" - }, - "urlMapEntry": { - "description": "File or class that handled the request.", - "type": "string" - }, - "instanceIndex": { - "description": "If the instance processing this request belongs to a manually scaled module, then this is the 0-based index of the instance. Otherwise, this value is -1.", - "format": "int32", - "type": "integer" - }, - "host": { - "description": "Internet host and port number of the resource being requested.", - "type": "string" - }, - "finished": { - "description": "Whether this request is finished or active.", - "type": "boolean" - }, - "httpVersion": { - "description": "HTTP version of request. Example: \"HTTP/1.1\".", - "type": "string" - }, - "startTime": { - "description": "Time when the request started.", - "format": "google-datetime", - "type": "string" - }, - "latency": { - "description": "Latency of the request.", - "format": "google-duration", - "type": "string" - }, - "ip": { - "description": "Origin IP address.", - "type": "string" - }, - "appId": { - "description": "Application that handled this request.", - "type": "string" - }, - "appEngineRelease": { - "description": "App Engine release version.", - "type": "string" - }, - "method": { - "description": "Request method. Example: \"GET\", \"HEAD\", \"PUT\", \"POST\", \"DELETE\".", - "type": "string" - }, - "cost": { - "description": "An indication of the relative cost of serving this request.", - "format": "double", - "type": "number" - }, - "instanceId": { - "description": "An identifier for the instance that handled the request.", - "type": "string" - }, - "megaCycles": { - "description": "Number of CPU megacycles used to process request.", - "format": "int64", - "type": "string" - }, - "first": { - "description": "Whether this is the first RequestLog entry for this request. If an active request has several RequestLog entries written to Stackdriver Logging, then this field will be set for one of them.", - "type": "boolean" - }, - "versionId": { - "description": "Version of the application that handled this request.", - "type": "string" - }, - "moduleId": { - "description": "Module of the application that handled this request.", - "type": "string" - } - }, - "id": "RequestLog" - }, - "ListMonitoredResourceDescriptorsResponse": { - "id": "ListMonitoredResourceDescriptorsResponse", - "description": "Result returned from ListMonitoredResourceDescriptors.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", - "type": "string" - }, - "resourceDescriptors": { - "description": "A list of resource descriptors.", - "type": "array", - "items": { - "$ref": "MonitoredResourceDescriptor" - } - } - } - }, - "SourceReference": { - "id": "SourceReference", - "description": "A reference to a particular snapshot of the source tree used to build and deploy an application.", - "type": "object", - "properties": { - "repository": { - "description": "Optional. A URI string identifying the repository. Example: \"https://github.com/GoogleCloudPlatform/kubernetes.git\"", - "type": "string" - }, - "revisionId": { - "description": "The canonical and persistent identifier of the deployed revision. Example (git): \"0035781c50ec7aa23385dc841529ce8a4b70db1b\"", - "type": "string" - } - } - }, - "LogEntryOperation": { - "description": "Additional information about a potentially long-running operation with which a log entry is associated.", - "type": "object", - "properties": { - "last": { - "description": "Optional. Set this to True if this is the last log entry in the operation.", - "type": "boolean" - }, - "id": { - "description": "Optional. An arbitrary operation identifier. Log entries with the same identifier are assumed to be part of the same operation.", - "type": "string" - }, - "producer": { - "description": "Optional. An arbitrary producer identifier. The combination of id and producer must be globally unique. Examples for producer: \"MyDivision.MyBigCompany.com\", \"github.com/MyProject/MyApplication\".", - "type": "string" - }, - "first": { - "description": "Optional. Set this to True if this is the first log entry in the operation.", - "type": "boolean" - } - }, - "id": "LogEntryOperation" - }, - "LogMetric": { - "description": "Describes a logs-based metric. The value of the metric is the number of log entries that match a logs filter in a given time interval.", - "type": "object", - "properties": { - "version": { - "enumDescriptions": [ - "Stackdriver Logging API v2.", - "Stackdriver Logging API v1." - ], - "enum": [ - "V2", - "V1" - ], - "description": "Output only. The API version that created or updated this metric. The version also dictates the syntax of the filter expression. When a value for this field is missing, the default value of V2 should be assumed.", - "type": "string" - }, - "filter": { - "description": "Required. An advanced logs filter which is used to match log entries. Example:\n\"resource.type=gae_app AND severity\u003e=ERROR\"\nThe maximum length of the filter is 20000 characters.", - "type": "string" - }, - "name": { - "description": "Required. The client-assigned metric identifier. Examples: \"error_count\", \"nginx/requests\".Metric identifiers are limited to 100 characters and can include only the following characters: A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash character (/) denotes a hierarchy of name pieces, and it cannot be the first character of the name.The metric identifier in this field must not be URL-encoded (https://en.wikipedia.org/wiki/Percent-encoding). However, when the metric identifier appears as the [METRIC_ID] part of a metric_name API parameter, then the metric identifier must be URL-encoded. Example: \"projects/my-project/metrics/nginx%2Frequests\".", - "type": "string" - }, - "description": { - "description": "Optional. A description of this metric, which is used in documentation.", - "type": "string" - } - }, - "id": "LogMetric" - }, - "WriteLogEntriesResponse": { - "description": "Result returned from WriteLogEntries. empty", - "type": "object", - "properties": {}, - "id": "WriteLogEntriesResponse" - }, - "MonitoredResource": { - "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", - "type": "object", - "properties": { - "labels": { - "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Cloud SQL databases use the labels \"database_id\" and \"zone\".", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "type": { - "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Cloud SQL database is \"cloudsql_database\".", - "type": "string" - } - }, - "id": "MonitoredResource" - }, - "LogSink": { - "id": "LogSink", - "description": "Describes a sink used to export log entries to one of the following destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls which log entries are exported. The sink must be created within a project, organization, billing account, or folder.", - "type": "object", - "properties": { - "filter": { - "description": "Optional. An advanced logs filter. The only exported log entries are those that are in the resource owning the sink and that match the filter. The filter must use the log entry format specified by the output_version_format parameter. For example, in the v2 format:\nlogName=\"projects/[PROJECT_ID]/logs/[LOG_ID]\" AND severity\u003e=ERROR\n", - "type": "string" - }, - "destination": { - "description": "Required. The export destination:\n\"storage.googleapis.com/[GCS_BUCKET]\"\n\"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\"\n\"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\"\nThe sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs With Sinks.", - "type": "string" - }, - "endTime": { - "description": "Optional. The time at which this sink will stop exporting log entries. Log entries are exported only if their timestamp is earlier than the end time. If this field is not supplied, there is no end time. If both a start time and an end time are provided, then the end time must be later than the start time.", - "format": "google-datetime", - "type": "string" - }, - "startTime": { - "description": "Optional. The time at which this sink will begin exporting log entries. Log entries are exported only if their timestamp is not earlier than the start time. The default value of this field is the time the sink is created or updated.", - "format": "google-datetime", - "type": "string" - }, - "writerIdentity": { - "description": "Output only. An IAM identity—a service account or group—under which Stackdriver Logging writes the exported log entries to the sink's destination. This field is set by sinks.create and sinks.update, based on the setting of unique_writer_identity in those methods.Until you grant this identity write-access to the destination, log entry exports from this sink will fail. For more information, see Granting access for a resource. Consult the destination service's documentation to determine the appropriate IAM roles to assign to the identity.", - "type": "string" - }, - "outputVersionFormat": { - "enumDescriptions": [ - "An unspecified format version that will default to V2.", - "LogEntry version 2 format.", - "LogEntry version 1 format." - ], - "enum": [ - "VERSION_FORMAT_UNSPECIFIED", - "V2", - "V1" - ], - "description": "Optional. The log entry format to use for this sink's exported log entries. The v2 format is used by default. The v1 format is deprecated and should be used only as part of a migration effort to v2. See Migration to the v2 API.", - "type": "string" - }, - "name": { - "description": "Required. The client-assigned sink identifier, unique within the project. Example: \"my-syslog-errors-to-pubsub\". Sink identifiers are limited to 100 characters and can include only the following characters: upper and lower-case alphanumeric characters, underscores, hyphens, and periods.", - "type": "string" - }, - "includeChildren": { - "description": "Optional. This field presently applies only to sinks in organizations and folders. If true, then logs from children of this entity will also be available to this sink for export. Whether particular log entries from the children are exported depends on the sink's filter expression. For example, if this sink is associated with an organization, then logs from all projects in the organization as well as from the organization itself will be available for export.", - "type": "boolean" - } - } - }, - "WriteLogEntriesRequest": { - "id": "WriteLogEntriesRequest", - "description": "The parameters to WriteLogEntries.", - "type": "object", - "properties": { - "labels": { - "description": "Optional. Default labels that are added to the labels field of all log entries in entries. If a log entry already has a label with the same key as a label in this parameter, then the log entry's label is not changed. See LogEntry.", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "resource": { - "$ref": "MonitoredResource", - "description": "Optional. A default monitored resource object that is assigned to all log entries in entries that do not specify a value for resource. Example:\n{ \"type\": \"gce_instance\",\n \"labels\": {\n \"zone\": \"us-central1-a\", \"instance_id\": \"00000000000000000000\" }}\nSee LogEntry." - }, - "logName": { - "description": "Optional. A default log resource name that is assigned to all log entries in entries that do not specify a value for log_name:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\" or \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - "type": "string" - }, - "entries": { - "description": "Required. The log entries to write. Values supplied for the fields log_name, resource, and labels in this entries.write request are inserted into those log entries in this list that do not provide their own values.Stackdriver Logging also creates and inserts values for timestamp and insert_id if the entries do not provide them. The created insert_id for the N'th entry in this list will be greater than earlier entries and less than later entries. Otherwise, the order of log entries in this list does not matter.To improve throughput and to avoid exceeding the quota limit for calls to entries.write, you should write multiple log entries at once rather than calling this method for each individual log entry.", - "type": "array", - "items": { - "$ref": "LogEntry" - } - }, - "partialSuccess": { - "description": "Optional. Whether valid entries should be written even if some other entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any entry is not written, then the response status is the error associated with one of the failed entries and the response includes error details keyed by the entries' zero-based index in the entries.write method.", - "type": "boolean" - } - } - }, - "ListLogsResponse": { - "id": "ListLogsResponse", - "description": "Result returned from ListLogs.", - "type": "object", - "properties": { - "logNames": { - "description": "A list of log names. For example, \"projects/my-project/syslog\" or \"organizations/123/cloudresourcemanager.googleapis.com%2Factivity\".", - "type": "array", - "items": { - "type": "string" - } - }, - "nextPageToken": { - "description": "If there might be more results than those appearing in this response, then nextPageToken is included. To get the next set of results, call this method again using the value of nextPageToken as pageToken.", - "type": "string" - } - } - }, - "ListSinksResponse": { - "description": "Result returned from ListSinks.", - "type": "object", - "properties": { - "sinks": { - "description": "A list of sinks.", - "type": "array", - "items": { - "$ref": "LogSink" - } - }, - "nextPageToken": { - "description": "If there might be more results than appear in this response, then nextPageToken is included. To get the next set of results, call the same method again using the value of nextPageToken as pageToken.", - "type": "string" - } - }, - "id": "ListSinksResponse" - }, - "HttpRequest": { - "description": "A common proto for logging HTTP requests. Only contains semantics defined by the HTTP specification. Product-specific logging information MUST be defined in a separate message.", - "type": "object", - "properties": { - "cacheFillBytes": { - "description": "The number of HTTP response bytes inserted into cache. Set only when a cache fill was attempted.", - "format": "int64", - "type": "string" - }, - "requestMethod": { - "description": "The request method. Examples: \"GET\", \"HEAD\", \"PUT\", \"POST\".", - "type": "string" - }, - "responseSize": { - "description": "The size of the HTTP response message sent back to the client, in bytes, including the response headers and the response body.", - "format": "int64", - "type": "string" - }, - "requestSize": { - "description": "The size of the HTTP request message in bytes, including the request headers and the request body.", - "format": "int64", - "type": "string" - }, - "requestUrl": { - "description": "The scheme (http, https), the host name, the path and the query portion of the URL that was requested. Example: \"http://example.com/some/info?color=red\".", - "type": "string" - }, - "remoteIp": { - "description": "The IP address (IPv4 or IPv6) of the client that issued the HTTP request. Examples: \"192.168.1.1\", \"FE80::0202:B3FF:FE1E:8329\".", - "type": "string" - }, - "serverIp": { - "description": "The IP address (IPv4 or IPv6) of the origin server that the request was sent to.", - "type": "string" - }, - "cacheLookup": { - "description": "Whether or not a cache lookup was attempted.", - "type": "boolean" - }, - "cacheHit": { - "description": "Whether or not an entity was served from cache (with or without validation).", - "type": "boolean" - }, - "cacheValidatedWithOriginServer": { - "description": "Whether or not the response was validated with the origin server before being served from cache. This field is only meaningful if cache_hit is True.", - "type": "boolean" - }, - "status": { - "description": "The response code indicating the status of response. Examples: 200, 404.", - "format": "int32", - "type": "integer" - }, - "referer": { - "description": "The referer URL of the request, as defined in HTTP/1.1 Header Field Definitions (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).", - "type": "string" - }, - "latency": { - "description": "The request processing latency on the server, from the time the request was received until the response was sent.", - "format": "google-duration", - "type": "string" - }, - "userAgent": { - "description": "The user agent sent by the client. Example: \"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)\".", - "type": "string" - } - }, - "id": "HttpRequest" - }, - "LabelDescriptor": { - "description": "A description of a label.", - "type": "object", - "properties": { - "key": { - "description": "The label key.", - "type": "string" - }, - "description": { - "description": "A human-readable description for the label.", - "type": "string" - }, - "valueType": { - "description": "The type of data that can be assigned to the label.", - "type": "string", - "enumDescriptions": [ - "A variable-length string. This is the default.", - "Boolean; true or false.", - "A 64-bit signed integer." - ], - "enum": [ - "STRING", - "BOOL", - "INT64" - ] - } - }, - "id": "LabelDescriptor" - } - }, - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "protocol": "rest", - "canonicalName": "Logging", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform.read-only": { - "description": "View your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/logging.admin": { - "description": "Administrate log data for your projects" - }, - "https://www.googleapis.com/auth/logging.read": { - "description": "View log data for your projects" - }, - "https://www.googleapis.com/auth/logging.write": { - "description": "Submit log data for your projects" - }, - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "rootUrl": "https://logging.googleapis.com/", - "ownerDomain": "google.com", - "name": "logging", - "batchPath": "batch" -} diff --git a/vendor/google.golang.org/api/logging/v2beta1/logging-gen.go b/vendor/google.golang.org/api/logging/v2beta1/logging-gen.go deleted file mode 100644 index 2ecbb6d931d..00000000000 --- a/vendor/google.golang.org/api/logging/v2beta1/logging-gen.go +++ /dev/null @@ -1,4414 +0,0 @@ -// Package logging provides access to the Stackdriver Logging API. -// -// See https://cloud.google.com/logging/docs/ -// -// Usage example: -// -// import "google.golang.org/api/logging/v2beta1" -// ... -// loggingService, err := logging.New(oauthHttpClient) -package logging // import "google.golang.org/api/logging/v2beta1" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" - "io" - "net/http" - "net/url" - "strconv" - "strings" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = ctxhttp.Do - -const apiId = "logging:v2beta1" -const apiName = "logging" -const apiVersion = "v2beta1" -const basePath = "https://logging.googleapis.com/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - - // View your data across Google Cloud Platform services - CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" - - // Administrate log data for your projects - LoggingAdminScope = "https://www.googleapis.com/auth/logging.admin" - - // View log data for your projects - LoggingReadScope = "https://www.googleapis.com/auth/logging.read" - - // Submit log data for your projects - LoggingWriteScope = "https://www.googleapis.com/auth/logging.write" -) - -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.BillingAccounts = NewBillingAccountsService(s) - s.Entries = NewEntriesService(s) - s.MonitoredResourceDescriptors = NewMonitoredResourceDescriptorsService(s) - s.Organizations = NewOrganizationsService(s) - s.Projects = NewProjectsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - BillingAccounts *BillingAccountsService - - Entries *EntriesService - - MonitoredResourceDescriptors *MonitoredResourceDescriptorsService - - Organizations *OrganizationsService - - Projects *ProjectsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewBillingAccountsService(s *Service) *BillingAccountsService { - rs := &BillingAccountsService{s: s} - rs.Logs = NewBillingAccountsLogsService(s) - return rs -} - -type BillingAccountsService struct { - s *Service - - Logs *BillingAccountsLogsService -} - -func NewBillingAccountsLogsService(s *Service) *BillingAccountsLogsService { - rs := &BillingAccountsLogsService{s: s} - return rs -} - -type BillingAccountsLogsService struct { - s *Service -} - -func NewEntriesService(s *Service) *EntriesService { - rs := &EntriesService{s: s} - return rs -} - -type EntriesService struct { - s *Service -} - -func NewMonitoredResourceDescriptorsService(s *Service) *MonitoredResourceDescriptorsService { - rs := &MonitoredResourceDescriptorsService{s: s} - return rs -} - -type MonitoredResourceDescriptorsService struct { - s *Service -} - -func NewOrganizationsService(s *Service) *OrganizationsService { - rs := &OrganizationsService{s: s} - rs.Logs = NewOrganizationsLogsService(s) - return rs -} - -type OrganizationsService struct { - s *Service - - Logs *OrganizationsLogsService -} - -func NewOrganizationsLogsService(s *Service) *OrganizationsLogsService { - rs := &OrganizationsLogsService{s: s} - return rs -} - -type OrganizationsLogsService struct { - s *Service -} - -func NewProjectsService(s *Service) *ProjectsService { - rs := &ProjectsService{s: s} - rs.Logs = NewProjectsLogsService(s) - rs.Metrics = NewProjectsMetricsService(s) - rs.Sinks = NewProjectsSinksService(s) - return rs -} - -type ProjectsService struct { - s *Service - - Logs *ProjectsLogsService - - Metrics *ProjectsMetricsService - - Sinks *ProjectsSinksService -} - -func NewProjectsLogsService(s *Service) *ProjectsLogsService { - rs := &ProjectsLogsService{s: s} - return rs -} - -type ProjectsLogsService struct { - s *Service -} - -func NewProjectsMetricsService(s *Service) *ProjectsMetricsService { - rs := &ProjectsMetricsService{s: s} - return rs -} - -type ProjectsMetricsService struct { - s *Service -} - -func NewProjectsSinksService(s *Service) *ProjectsSinksService { - rs := &ProjectsSinksService{s: s} - return rs -} - -type ProjectsSinksService struct { - s *Service -} - -// Empty: A generic empty message that you can re-use to avoid defining -// duplicated empty messages in your APIs. A typical example is to use -// it as the request or the response type of an API method. For -// instance: -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// The JSON representation for Empty is empty JSON object {}. -type Empty struct { - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` -} - -// HttpRequest: A common proto for logging HTTP requests. Only contains -// semantics defined by the HTTP specification. Product-specific logging -// information MUST be defined in a separate message. -type HttpRequest struct { - // CacheFillBytes: The number of HTTP response bytes inserted into - // cache. Set only when a cache fill was attempted. - CacheFillBytes int64 `json:"cacheFillBytes,omitempty,string"` - - // CacheHit: Whether or not an entity was served from cache (with or - // without validation). - CacheHit bool `json:"cacheHit,omitempty"` - - // CacheLookup: Whether or not a cache lookup was attempted. - CacheLookup bool `json:"cacheLookup,omitempty"` - - // CacheValidatedWithOriginServer: Whether or not the response was - // validated with the origin server before being served from cache. This - // field is only meaningful if cache_hit is True. - CacheValidatedWithOriginServer bool `json:"cacheValidatedWithOriginServer,omitempty"` - - // Latency: The request processing latency on the server, from the time - // the request was received until the response was sent. - Latency string `json:"latency,omitempty"` - - // Referer: The referer URL of the request, as defined in HTTP/1.1 - // Header Field Definitions - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). - Referer string `json:"referer,omitempty"` - - // RemoteIp: The IP address (IPv4 or IPv6) of the client that issued the - // HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329". - RemoteIp string `json:"remoteIp,omitempty"` - - // RequestMethod: The request method. Examples: "GET", "HEAD", "PUT", - // "POST". - RequestMethod string `json:"requestMethod,omitempty"` - - // RequestSize: The size of the HTTP request message in bytes, including - // the request headers and the request body. - RequestSize int64 `json:"requestSize,omitempty,string"` - - // RequestUrl: The scheme (http, https), the host name, the path and the - // query portion of the URL that was requested. Example: - // "http://example.com/some/info?color=red". - RequestUrl string `json:"requestUrl,omitempty"` - - // ResponseSize: The size of the HTTP response message sent back to the - // client, in bytes, including the response headers and the response - // body. - ResponseSize int64 `json:"responseSize,omitempty,string"` - - // ServerIp: The IP address (IPv4 or IPv6) of the origin server that the - // request was sent to. - ServerIp string `json:"serverIp,omitempty"` - - // Status: The response code indicating the status of response. - // Examples: 200, 404. - Status int64 `json:"status,omitempty"` - - // UserAgent: The user agent sent by the client. Example: "Mozilla/4.0 - // (compatible; MSIE 6.0; Windows 98; Q312461; .NET CLR 1.0.3705)". - UserAgent string `json:"userAgent,omitempty"` - - // ForceSendFields is a list of field names (e.g. "CacheFillBytes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CacheFillBytes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *HttpRequest) MarshalJSON() ([]byte, error) { - type noMethod HttpRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LabelDescriptor: A description of a label. -type LabelDescriptor struct { - // Description: A human-readable description for the label. - Description string `json:"description,omitempty"` - - // Key: The label key. - Key string `json:"key,omitempty"` - - // ValueType: The type of data that can be assigned to the label. - // - // Possible values: - // "STRING" - A variable-length string. This is the default. - // "BOOL" - Boolean; true or false. - // "INT64" - A 64-bit signed integer. - ValueType string `json:"valueType,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { - type noMethod LabelDescriptor - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListLogEntriesRequest: The parameters to ListLogEntries. -type ListLogEntriesRequest struct { - // Filter: Optional. A filter that chooses which log entries to return. - // See Advanced Logs Filters. Only log entries that match the filter are - // returned. An empty filter matches all log entries in the resources - // listed in resource_names. Referencing a parent resource that is not - // listed in resource_names will cause the filter to return no results. - // The maximum length of the filter is 20000 characters. - Filter string `json:"filter,omitempty"` - - // OrderBy: Optional. How the results should be sorted. Presently, the - // only permitted values are "timestamp asc" (default) and "timestamp - // desc". The first option returns entries in order of increasing values - // of LogEntry.timestamp (oldest first), and the second option returns - // entries in order of decreasing timestamps (newest first). Entries - // with equal timestamps are returned in order of their insert_id - // values. - OrderBy string `json:"orderBy,omitempty"` - - // PageSize: Optional. The maximum number of results to return from this - // request. Non-positive values are ignored. The presence of - // next_page_token in the response indicates that more results might be - // available. - PageSize int64 `json:"pageSize,omitempty"` - - // PageToken: Optional. If present, then retrieve the next batch of - // results from the preceding call to this method. page_token must be - // the value of next_page_token from the previous response. The values - // of other method parameters should be identical to those in the - // previous call. - PageToken string `json:"pageToken,omitempty"` - - // ProjectIds: Deprecated. Use resource_names instead. One or more - // project identifiers or project numbers from which to retrieve log - // entries. Example: "my-project-1A". If present, these project - // identifiers are converted to resource name format and added to the - // list of resources in resource_names. - ProjectIds []string `json:"projectIds,omitempty"` - - // ResourceNames: Required. Names of one or more parent resources from - // which to retrieve log - // entries: - // "projects/[PROJECT_ID]" - // "organizations/[ORGANIZATION_ID]" - // "bi - // llingAccounts/[BILLING_ACCOUNT_ID]" - // "folders/[FOLDER_ID]" - // Projects listed in the project_ids field are added to this list. - ResourceNames []string `json:"resourceNames,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Filter") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Filter") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListLogEntriesRequest) MarshalJSON() ([]byte, error) { - type noMethod ListLogEntriesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListLogEntriesResponse: Result returned from ListLogEntries. -type ListLogEntriesResponse struct { - // Entries: A list of log entries. - Entries []*LogEntry `json:"entries,omitempty"` - - // NextPageToken: If there might be more results than those appearing in - // this response, then nextPageToken is included. To get the next set of - // results, call this method again using the value of nextPageToken as - // pageToken.If a value for next_page_token appears and the entries - // field is empty, it means that the search found no log entries so far - // but it did not have time to search all the possible log entries. - // Retry the method with this value for page_token to continue the - // search. Alternatively, consider speeding up the search by changing - // your filter to specify a single log name or resource type, or to - // narrow the time range of the search. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Entries") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Entries") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListLogEntriesResponse) MarshalJSON() ([]byte, error) { - type noMethod ListLogEntriesResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListLogMetricsResponse: Result returned from ListLogMetrics. -type ListLogMetricsResponse struct { - // Metrics: A list of logs-based metrics. - Metrics []*LogMetric `json:"metrics,omitempty"` - - // NextPageToken: If there might be more results than appear in this - // response, then nextPageToken is included. To get the next set of - // results, call this method again using the value of nextPageToken as - // pageToken. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Metrics") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Metrics") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListLogMetricsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListLogMetricsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListLogsResponse: Result returned from ListLogs. -type ListLogsResponse struct { - // LogNames: A list of log names. For example, - // "projects/my-project/syslog" or - // "organizations/123/cloudresourcemanager.googleapis.com%2Factivity". - LogNames []string `json:"logNames,omitempty"` - - // NextPageToken: If there might be more results than those appearing in - // this response, then nextPageToken is included. To get the next set of - // results, call this method again using the value of nextPageToken as - // pageToken. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "LogNames") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LogNames") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListLogsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListLogsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListMonitoredResourceDescriptorsResponse: Result returned from -// ListMonitoredResourceDescriptors. -type ListMonitoredResourceDescriptorsResponse struct { - // NextPageToken: If there might be more results than those appearing in - // this response, then nextPageToken is included. To get the next set of - // results, call this method again using the value of nextPageToken as - // pageToken. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ResourceDescriptors: A list of resource descriptors. - ResourceDescriptors []*MonitoredResourceDescriptor `json:"resourceDescriptors,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NextPageToken") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListMonitoredResourceDescriptorsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListMonitoredResourceDescriptorsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListSinksResponse: Result returned from ListSinks. -type ListSinksResponse struct { - // NextPageToken: If there might be more results than appear in this - // response, then nextPageToken is included. To get the next set of - // results, call the same method again using the value of nextPageToken - // as pageToken. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Sinks: A list of sinks. - Sinks []*LogSink `json:"sinks,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NextPageToken") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListSinksResponse) MarshalJSON() ([]byte, error) { - type noMethod ListSinksResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LogEntry: An individual entry in a log. -type LogEntry struct { - // HttpRequest: Optional. Information about the HTTP request associated - // with this log entry, if applicable. - HttpRequest *HttpRequest `json:"httpRequest,omitempty"` - - // InsertId: Optional. A unique identifier for the log entry. If you - // provide a value, then Stackdriver Logging considers other log entries - // in the same project, with the same timestamp, and with the same - // insert_id to be duplicates which can be removed. If omitted in new - // log entries, then Stackdriver Logging will insert its own unique - // identifier. The insert_id is used to order log entries that have the - // same timestamp value. - InsertId string `json:"insertId,omitempty"` - - // JsonPayload: The log entry payload, represented as a structure that - // is expressed as a JSON object. - JsonPayload googleapi.RawMessage `json:"jsonPayload,omitempty"` - - // Labels: Optional. A set of user-defined (key, value) data that - // provides additional information about the log entry. - Labels map[string]string `json:"labels,omitempty"` - - // LogName: Required. The resource name of the log to which this log - // entry - // belongs: - // "projects/[PROJECT_ID]/logs/[LOG_ID]" - // "organizations/[ORGANIZ - // ATION_ID]/logs/[LOG_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[L - // OG_ID]" - // "folders/[FOLDER_ID]/logs/[LOG_ID]" - // [LOG_ID] must be URL-encoded within log_name. Example: - // "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa - // ctivity". [LOG_ID] must be less than 512 characters long and can only - // include the following characters: upper and lower case alphanumeric - // characters, forward-slash, underscore, hyphen, and period.For - // backward compatibility, if log_name begins with a forward-slash, such - // as /projects/..., then the log entry is ingested as usual but the - // forward-slash is removed. Listing the log entry will not show the - // leading slash and filtering for a log name with a leading slash will - // never return any results. - LogName string `json:"logName,omitempty"` - - // Operation: Optional. Information about an operation associated with - // the log entry, if applicable. - Operation *LogEntryOperation `json:"operation,omitempty"` - - // ProtoPayload: The log entry payload, represented as a protocol - // buffer. Some Google Cloud Platform services use this field for their - // log entry payloads. - ProtoPayload googleapi.RawMessage `json:"protoPayload,omitempty"` - - // Resource: Required. The monitored resource associated with this log - // entry. Example: a log entry that reports a database error would be - // associated with the monitored resource designating the particular - // database that reported the error. - Resource *MonitoredResource `json:"resource,omitempty"` - - // Severity: Optional. The severity of the log entry. The default value - // is LogSeverity.DEFAULT. - // - // Possible values: - // "DEFAULT" - (0) The log entry has no assigned severity level. - // "DEBUG" - (100) Debug or trace information. - // "INFO" - (200) Routine information, such as ongoing status or - // performance. - // "NOTICE" - (300) Normal but significant events, such as start up, - // shut down, or a configuration change. - // "WARNING" - (400) Warning events might cause problems. - // "ERROR" - (500) Error events are likely to cause problems. - // "CRITICAL" - (600) Critical events cause more severe problems or - // outages. - // "ALERT" - (700) A person must take an action immediately. - // "EMERGENCY" - (800) One or more systems are unusable. - Severity string `json:"severity,omitempty"` - - // SourceLocation: Optional. Source code location information associated - // with the log entry, if any. - SourceLocation *LogEntrySourceLocation `json:"sourceLocation,omitempty"` - - // TextPayload: The log entry payload, represented as a Unicode string - // (UTF-8). - TextPayload string `json:"textPayload,omitempty"` - - // Timestamp: Optional. The time the event described by the log entry - // occurred. If omitted in a new log entry, Stackdriver Logging will - // insert the time the log entry is received. Stackdriver Logging might - // reject log entries whose time stamps are more than a couple of hours - // in the future. Log entries with time stamps in the past are accepted. - Timestamp string `json:"timestamp,omitempty"` - - // Trace: Optional. Resource name of the trace associated with the log - // entry, if any. If it contains a relative resource name, the name is - // assumed to be relative to //tracing.googleapis.com. Example: - // projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824 - Trace string `json:"trace,omitempty"` - - // ForceSendFields is a list of field names (e.g. "HttpRequest") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HttpRequest") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogEntry) MarshalJSON() ([]byte, error) { - type noMethod LogEntry - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LogEntryOperation: Additional information about a potentially -// long-running operation with which a log entry is associated. -type LogEntryOperation struct { - // First: Optional. Set this to True if this is the first log entry in - // the operation. - First bool `json:"first,omitempty"` - - // Id: Optional. An arbitrary operation identifier. Log entries with the - // same identifier are assumed to be part of the same operation. - Id string `json:"id,omitempty"` - - // Last: Optional. Set this to True if this is the last log entry in the - // operation. - Last bool `json:"last,omitempty"` - - // Producer: Optional. An arbitrary producer identifier. The combination - // of id and producer must be globally unique. Examples for producer: - // "MyDivision.MyBigCompany.com", "github.com/MyProject/MyApplication". - Producer string `json:"producer,omitempty"` - - // ForceSendFields is a list of field names (e.g. "First") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "First") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogEntryOperation) MarshalJSON() ([]byte, error) { - type noMethod LogEntryOperation - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LogEntrySourceLocation: Additional information about the source code -// location that produced the log entry. -type LogEntrySourceLocation struct { - // File: Optional. Source file name. Depending on the runtime - // environment, this might be a simple name or a fully-qualified name. - File string `json:"file,omitempty"` - - // Function: Optional. Human-readable name of the function or method - // being invoked, with optional context such as the class or package - // name. This information may be used in contexts such as the logs - // viewer, where a file and line number are less meaningful. The format - // can vary by language. For example: qual.if.ied.Class.method (Java), - // dir/package.func (Go), function (Python). - Function string `json:"function,omitempty"` - - // Line: Optional. Line within the source file. 1-based; 0 indicates no - // line number available. - Line int64 `json:"line,omitempty,string"` - - // ForceSendFields is a list of field names (e.g. "File") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "File") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogEntrySourceLocation) MarshalJSON() ([]byte, error) { - type noMethod LogEntrySourceLocation - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LogLine: Application log line emitted while processing a request. -type LogLine struct { - // LogMessage: App-provided log message. - LogMessage string `json:"logMessage,omitempty"` - - // Severity: Severity of this log entry. - // - // Possible values: - // "DEFAULT" - (0) The log entry has no assigned severity level. - // "DEBUG" - (100) Debug or trace information. - // "INFO" - (200) Routine information, such as ongoing status or - // performance. - // "NOTICE" - (300) Normal but significant events, such as start up, - // shut down, or a configuration change. - // "WARNING" - (400) Warning events might cause problems. - // "ERROR" - (500) Error events are likely to cause problems. - // "CRITICAL" - (600) Critical events cause more severe problems or - // outages. - // "ALERT" - (700) A person must take an action immediately. - // "EMERGENCY" - (800) One or more systems are unusable. - Severity string `json:"severity,omitempty"` - - // SourceLocation: Where in the source code this log message was - // written. - SourceLocation *SourceLocation `json:"sourceLocation,omitempty"` - - // Time: Approximate time when this log entry was made. - Time string `json:"time,omitempty"` - - // ForceSendFields is a list of field names (e.g. "LogMessage") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LogMessage") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogLine) MarshalJSON() ([]byte, error) { - type noMethod LogLine - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LogMetric: Describes a logs-based metric. The value of the metric is -// the number of log entries that match a logs filter in a given time -// interval. -type LogMetric struct { - // Description: Optional. A description of this metric, which is used in - // documentation. - Description string `json:"description,omitempty"` - - // Filter: Required. An advanced logs filter which is used to match log - // entries. Example: - // "resource.type=gae_app AND severity>=ERROR" - // The maximum length of the filter is 20000 characters. - Filter string `json:"filter,omitempty"` - - // Name: Required. The client-assigned metric identifier. Examples: - // "error_count", "nginx/requests".Metric identifiers are limited to 100 - // characters and can include only the following characters: A-Z, a-z, - // 0-9, and the special characters _-.,+!*',()%/. The forward-slash - // character (/) denotes a hierarchy of name pieces, and it cannot be - // the first character of the name.The metric identifier in this field - // must not be URL-encoded - // (https://en.wikipedia.org/wiki/Percent-encoding). However, when the - // metric identifier appears as the [METRIC_ID] part of a metric_name - // API parameter, then the metric identifier must be URL-encoded. - // Example: "projects/my-project/metrics/nginx%2Frequests". - Name string `json:"name,omitempty"` - - // Version: Output only. The API version that created or updated this - // metric. The version also dictates the syntax of the filter - // expression. When a value for this field is missing, the default value - // of V2 should be assumed. - // - // Possible values: - // "V2" - Stackdriver Logging API v2. - // "V1" - Stackdriver Logging API v1. - Version string `json:"version,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogMetric) MarshalJSON() ([]byte, error) { - type noMethod LogMetric - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LogSink: Describes a sink used to export log entries to one of the -// following destinations in any project: a Cloud Storage bucket, a -// BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter controls -// which log entries are exported. The sink must be created within a -// project, organization, billing account, or folder. -type LogSink struct { - // Destination: Required. The export - // destination: - // "storage.googleapis.com/[GCS_BUCKET]" - // "bigquery.googleapi - // s.com/projects/[PROJECT_ID]/datasets/[DATASET]" - // "pubsub.googleapis.com - // /projects/[PROJECT_ID]/topics/[TOPIC_ID]" - // The sink's writer_identity, set when the sink is created, must have - // permission to write to the destination or else the log entries are - // not exported. For more information, see Exporting Logs With Sinks. - Destination string `json:"destination,omitempty"` - - // EndTime: Optional. The time at which this sink will stop exporting - // log entries. Log entries are exported only if their timestamp is - // earlier than the end time. If this field is not supplied, there is no - // end time. If both a start time and an end time are provided, then the - // end time must be later than the start time. - EndTime string `json:"endTime,omitempty"` - - // Filter: Optional. An advanced logs filter. The only exported log - // entries are those that are in the resource owning the sink and that - // match the filter. The filter must use the log entry format specified - // by the output_version_format parameter. For example, in the v2 - // format: - // logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR - // - Filter string `json:"filter,omitempty"` - - // IncludeChildren: Optional. This field presently applies only to sinks - // in organizations and folders. If true, then logs from children of - // this entity will also be available to this sink for export. Whether - // particular log entries from the children are exported depends on the - // sink's filter expression. For example, if this sink is associated - // with an organization, then logs from all projects in the organization - // as well as from the organization itself will be available for export. - IncludeChildren bool `json:"includeChildren,omitempty"` - - // Name: Required. The client-assigned sink identifier, unique within - // the project. Example: "my-syslog-errors-to-pubsub". Sink identifiers - // are limited to 100 characters and can include only the following - // characters: upper and lower-case alphanumeric characters, - // underscores, hyphens, and periods. - Name string `json:"name,omitempty"` - - // OutputVersionFormat: Optional. The log entry format to use for this - // sink's exported log entries. The v2 format is used by default. The v1 - // format is deprecated and should be used only as part of a migration - // effort to v2. See Migration to the v2 API. - // - // Possible values: - // "VERSION_FORMAT_UNSPECIFIED" - An unspecified format version that - // will default to V2. - // "V2" - LogEntry version 2 format. - // "V1" - LogEntry version 1 format. - OutputVersionFormat string `json:"outputVersionFormat,omitempty"` - - // StartTime: Optional. The time at which this sink will begin exporting - // log entries. Log entries are exported only if their timestamp is not - // earlier than the start time. The default value of this field is the - // time the sink is created or updated. - StartTime string `json:"startTime,omitempty"` - - // WriterIdentity: Output only. An IAM identity—a service account - // or group—under which Stackdriver Logging writes the exported - // log entries to the sink's destination. This field is set by - // sinks.create and sinks.update, based on the setting of - // unique_writer_identity in those methods.Until you grant this identity - // write-access to the destination, log entry exports from this sink - // will fail. For more information, see Granting access for a resource. - // Consult the destination service's documentation to determine the - // appropriate IAM roles to assign to the identity. - WriterIdentity string `json:"writerIdentity,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Destination") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Destination") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogSink) MarshalJSON() ([]byte, error) { - type noMethod LogSink - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MonitoredResource: An object representing a resource that can be used -// for monitoring, logging, billing, or other purposes. Examples include -// virtual machine instances, databases, and storage devices such as -// disks. The type field identifies a MonitoredResourceDescriptor object -// that describes the resource's schema. Information in the labels field -// identifies the actual resource and its attributes according to the -// schema. For example, a particular Compute Engine VM instance could be -// represented by the following object, because the -// MonitoredResourceDescriptor for "gce_instance" has labels -// "instance_id" and "zone": -// { "type": "gce_instance", -// "labels": { "instance_id": "12345678901234", -// "zone": "us-central1-a" }} -// -type MonitoredResource struct { - // Labels: Required. Values for all of the labels listed in the - // associated monitored resource descriptor. For example, Cloud SQL - // databases use the labels "database_id" and "zone". - Labels map[string]string `json:"labels,omitempty"` - - // Type: Required. The monitored resource type. This field must match - // the type field of a MonitoredResourceDescriptor object. For example, - // the type of a Cloud SQL database is "cloudsql_database". - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Labels") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MonitoredResource) MarshalJSON() ([]byte, error) { - type noMethod MonitoredResource - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MonitoredResourceDescriptor: An object that describes the schema of a -// MonitoredResource object using a type name and a set of labels. For -// example, the monitored resource descriptor for Google Compute Engine -// VM instances has a type of "gce_instance" and specifies the use of -// the labels "instance_id" and "zone" to identify particular VM -// instances.Different APIs can support different monitored resource -// types. APIs generally provide a list method that returns the -// monitored resource descriptors used by the API. -type MonitoredResourceDescriptor struct { - // Description: Optional. A detailed description of the monitored - // resource type that might be used in documentation. - Description string `json:"description,omitempty"` - - // DisplayName: Optional. A concise name for the monitored resource type - // that might be displayed in user interfaces. It should be a Title - // Cased Noun Phrase, without any article or other determiners. For - // example, "Google Cloud SQL Database". - DisplayName string `json:"displayName,omitempty"` - - // Labels: Required. A set of labels used to describe instances of this - // monitored resource type. For example, an individual Google Cloud SQL - // database is identified by values for the labels "database_id" and - // "zone". - Labels []*LabelDescriptor `json:"labels,omitempty"` - - // Name: Optional. The resource name of the monitored resource - // descriptor: - // "projects/{project_id}/monitoredResourceDescriptors/{type}" where - // {type} is the value of the type field in this object and {project_id} - // is a project ID that provides API-specific context for accessing the - // type. APIs that do not use project information can use the resource - // name format "monitoredResourceDescriptors/{type}". - Name string `json:"name,omitempty"` - - // Type: Required. The monitored resource type. For example, the type - // "cloudsql_database" represents databases in Google Cloud SQL. The - // maximum length of this value is 256 characters. - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { - type noMethod MonitoredResourceDescriptor - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RequestLog: Complete log information about a single HTTP request to -// an App Engine application. -type RequestLog struct { - // AppEngineRelease: App Engine release version. - AppEngineRelease string `json:"appEngineRelease,omitempty"` - - // AppId: Application that handled this request. - AppId string `json:"appId,omitempty"` - - // Cost: An indication of the relative cost of serving this request. - Cost float64 `json:"cost,omitempty"` - - // EndTime: Time when the request finished. - EndTime string `json:"endTime,omitempty"` - - // Finished: Whether this request is finished or active. - Finished bool `json:"finished,omitempty"` - - // First: Whether this is the first RequestLog entry for this request. - // If an active request has several RequestLog entries written to - // Stackdriver Logging, then this field will be set for one of them. - First bool `json:"first,omitempty"` - - // Host: Internet host and port number of the resource being requested. - Host string `json:"host,omitempty"` - - // HttpVersion: HTTP version of request. Example: "HTTP/1.1". - HttpVersion string `json:"httpVersion,omitempty"` - - // InstanceId: An identifier for the instance that handled the request. - InstanceId string `json:"instanceId,omitempty"` - - // InstanceIndex: If the instance processing this request belongs to a - // manually scaled module, then this is the 0-based index of the - // instance. Otherwise, this value is -1. - InstanceIndex int64 `json:"instanceIndex,omitempty"` - - // Ip: Origin IP address. - Ip string `json:"ip,omitempty"` - - // Latency: Latency of the request. - Latency string `json:"latency,omitempty"` - - // Line: A list of log lines emitted by the application while serving - // this request. - Line []*LogLine `json:"line,omitempty"` - - // MegaCycles: Number of CPU megacycles used to process request. - MegaCycles int64 `json:"megaCycles,omitempty,string"` - - // Method: Request method. Example: "GET", "HEAD", "PUT", "POST", - // "DELETE". - Method string `json:"method,omitempty"` - - // ModuleId: Module of the application that handled this request. - ModuleId string `json:"moduleId,omitempty"` - - // Nickname: The logged-in user who made the request.Most likely, this - // is the part of the user's email before the @ sign. The field value is - // the same for different requests from the same user, but different - // users can have similar names. This information is also available to - // the application via the App Engine Users API.This field will be - // populated starting with App Engine 1.9.21. - Nickname string `json:"nickname,omitempty"` - - // PendingTime: Time this request spent in the pending request queue. - PendingTime string `json:"pendingTime,omitempty"` - - // Referrer: Referrer URL of request. - Referrer string `json:"referrer,omitempty"` - - // RequestId: Globally unique identifier for a request, which is based - // on the request start time. Request IDs for requests which started - // later will compare greater as strings than those for requests which - // started earlier. - RequestId string `json:"requestId,omitempty"` - - // Resource: Contains the path and query portion of the URL that was - // requested. For example, if the URL was - // "http://example.com/app?name=val", the resource would be - // "/app?name=val". The fragment identifier, which is identified by the - // # character, is not included. - Resource string `json:"resource,omitempty"` - - // ResponseSize: Size in bytes sent back to client by request. - ResponseSize int64 `json:"responseSize,omitempty,string"` - - // SourceReference: Source code for the application that handled this - // request. There can be more than one source reference per deployed - // application if source code is distributed among multiple - // repositories. - SourceReference []*SourceReference `json:"sourceReference,omitempty"` - - // StartTime: Time when the request started. - StartTime string `json:"startTime,omitempty"` - - // Status: HTTP response status code. Example: 200, 404. - Status int64 `json:"status,omitempty"` - - // TaskName: Task name of the request, in the case of an offline - // request. - TaskName string `json:"taskName,omitempty"` - - // TaskQueueName: Queue name of the request, in the case of an offline - // request. - TaskQueueName string `json:"taskQueueName,omitempty"` - - // TraceId: Stackdriver Trace identifier for this request. - TraceId string `json:"traceId,omitempty"` - - // UrlMapEntry: File or class that handled the request. - UrlMapEntry string `json:"urlMapEntry,omitempty"` - - // UserAgent: User agent that made the request. - UserAgent string `json:"userAgent,omitempty"` - - // VersionId: Version of the application that handled this request. - VersionId string `json:"versionId,omitempty"` - - // WasLoadingRequest: Whether this was a loading request for the - // instance. - WasLoadingRequest bool `json:"wasLoadingRequest,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AppEngineRelease") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AppEngineRelease") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RequestLog) MarshalJSON() ([]byte, error) { - type noMethod RequestLog - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *RequestLog) UnmarshalJSON(data []byte) error { - type noMethod RequestLog - var s1 struct { - Cost gensupport.JSONFloat64 `json:"cost"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Cost = float64(s1.Cost) - return nil -} - -// SourceLocation: Specifies a location in a source code file. -type SourceLocation struct { - // File: Source file name. Depending on the runtime environment, this - // might be a simple name or a fully-qualified name. - File string `json:"file,omitempty"` - - // FunctionName: Human-readable name of the function or method being - // invoked, with optional context such as the class or package name. - // This information is used in contexts such as the logs viewer, where a - // file and line number are less meaningful. The format can vary by - // language. For example: qual.if.ied.Class.method (Java), - // dir/package.func (Go), function (Python). - FunctionName string `json:"functionName,omitempty"` - - // Line: Line within the source file. - Line int64 `json:"line,omitempty,string"` - - // ForceSendFields is a list of field names (e.g. "File") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "File") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SourceLocation) MarshalJSON() ([]byte, error) { - type noMethod SourceLocation - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SourceReference: A reference to a particular snapshot of the source -// tree used to build and deploy an application. -type SourceReference struct { - // Repository: Optional. A URI string identifying the repository. - // Example: "https://github.com/GoogleCloudPlatform/kubernetes.git" - Repository string `json:"repository,omitempty"` - - // RevisionId: The canonical and persistent identifier of the deployed - // revision. Example (git): "0035781c50ec7aa23385dc841529ce8a4b70db1b" - RevisionId string `json:"revisionId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Repository") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Repository") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SourceReference) MarshalJSON() ([]byte, error) { - type noMethod SourceReference - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// WriteLogEntriesRequest: The parameters to WriteLogEntries. -type WriteLogEntriesRequest struct { - // Entries: Required. The log entries to write. Values supplied for the - // fields log_name, resource, and labels in this entries.write request - // are inserted into those log entries in this list that do not provide - // their own values.Stackdriver Logging also creates and inserts values - // for timestamp and insert_id if the entries do not provide them. The - // created insert_id for the N'th entry in this list will be greater - // than earlier entries and less than later entries. Otherwise, the - // order of log entries in this list does not matter.To improve - // throughput and to avoid exceeding the quota limit for calls to - // entries.write, you should write multiple log entries at once rather - // than calling this method for each individual log entry. - Entries []*LogEntry `json:"entries,omitempty"` - - // Labels: Optional. Default labels that are added to the labels field - // of all log entries in entries. If a log entry already has a label - // with the same key as a label in this parameter, then the log entry's - // label is not changed. See LogEntry. - Labels map[string]string `json:"labels,omitempty"` - - // LogName: Optional. A default log resource name that is assigned to - // all log entries in entries that do not specify a value for - // log_name: - // "projects/[PROJECT_ID]/logs/[LOG_ID]" - // "organizations/[ORGANI - // ZATION_ID]/logs/[LOG_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[ - // LOG_ID]" - // "folders/[FOLDER_ID]/logs/[LOG_ID]" - // [LOG_ID] must be URL-encoded. For example, - // "projects/my-project-id/logs/syslog" or - // "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Fa - // ctivity". For more information about log names, see LogEntry. - LogName string `json:"logName,omitempty"` - - // PartialSuccess: Optional. Whether valid entries should be written - // even if some other entries fail due to INVALID_ARGUMENT or - // PERMISSION_DENIED errors. If any entry is not written, then the - // response status is the error associated with one of the failed - // entries and the response includes error details keyed by the entries' - // zero-based index in the entries.write method. - PartialSuccess bool `json:"partialSuccess,omitempty"` - - // Resource: Optional. A default monitored resource object that is - // assigned to all log entries in entries that do not specify a value - // for resource. Example: - // { "type": "gce_instance", - // "labels": { - // "zone": "us-central1-a", "instance_id": "00000000000000000000" - // }} - // See LogEntry. - Resource *MonitoredResource `json:"resource,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Entries") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Entries") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *WriteLogEntriesRequest) MarshalJSON() ([]byte, error) { - type noMethod WriteLogEntriesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// WriteLogEntriesResponse: Result returned from WriteLogEntries. empty -type WriteLogEntriesResponse struct { - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` -} - -// method id "logging.billingAccounts.logs.delete": - -type BillingAccountsLogsDeleteCall struct { - s *Service - logName string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. Log entries written shortly before the delete -// operation might not be deleted. -func (r *BillingAccountsLogsService) Delete(logName string) *BillingAccountsLogsDeleteCall { - c := &BillingAccountsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.logName = logName - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BillingAccountsLogsDeleteCall) Fields(s ...googleapi.Field) *BillingAccountsLogsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BillingAccountsLogsDeleteCall) Context(ctx context.Context) *BillingAccountsLogsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BillingAccountsLogsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BillingAccountsLogsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+logName}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "logName": c.logName, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.billingAccounts.logs.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BillingAccountsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", - // "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs/{logsId}", - // "httpMethod": "DELETE", - // "id": "logging.billingAccounts.logs.delete", - // "parameterOrder": [ - // "logName" - // ], - // "parameters": { - // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - // "location": "path", - // "pattern": "^billingAccounts/[^/]+/logs/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+logName}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" - // ] - // } - -} - -// method id "logging.billingAccounts.logs.list": - -type BillingAccountsLogsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists the logs in projects, organizations, folders, or billing -// accounts. Only logs that have entries are listed. -func (r *BillingAccountsLogsService) List(parent string) *BillingAccountsLogsListCall { - c := &BillingAccountsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *BillingAccountsLogsListCall) PageSize(pageSize int64) *BillingAccountsLogsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *BillingAccountsLogsListCall) PageToken(pageToken string) *BillingAccountsLogsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BillingAccountsLogsListCall) Fields(s ...googleapi.Field) *BillingAccountsLogsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BillingAccountsLogsListCall) IfNoneMatch(entityTag string) *BillingAccountsLogsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BillingAccountsLogsListCall) Context(ctx context.Context) *BillingAccountsLogsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BillingAccountsLogsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BillingAccountsLogsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+parent}/logs") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.billingAccounts.logs.list" call. -// Exactly one of *ListLogsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListLogsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListLogsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - // "flatPath": "v2beta1/billingAccounts/{billingAccountsId}/logs", - // "httpMethod": "GET", - // "id": "logging.billingAccounts.logs.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - // "location": "path", - // "pattern": "^billingAccounts/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+parent}/logs", - // "response": { - // "$ref": "ListLogsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BillingAccountsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.entries.list": - -type EntriesListCall struct { - s *Service - listlogentriesrequest *ListLogEntriesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// List: Lists log entries. Use this method to retrieve log entries from -// Stackdriver Logging. For ways to export log entries, see Exporting -// Logs. -func (r *EntriesService) List(listlogentriesrequest *ListLogEntriesRequest) *EntriesListCall { - c := &EntriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.listlogentriesrequest = listlogentriesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *EntriesListCall) Fields(s ...googleapi.Field) *EntriesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *EntriesListCall) Context(ctx context.Context) *EntriesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *EntriesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *EntriesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.listlogentriesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/entries:list") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.entries.list" call. -// Exactly one of *ListLogEntriesResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListLogEntriesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *EntriesListCall) Do(opts ...googleapi.CallOption) (*ListLogEntriesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListLogEntriesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists log entries. Use this method to retrieve log entries from Stackdriver Logging. For ways to export log entries, see Exporting Logs.", - // "flatPath": "v2beta1/entries:list", - // "httpMethod": "POST", - // "id": "logging.entries.list", - // "parameterOrder": [], - // "parameters": {}, - // "path": "v2beta1/entries:list", - // "request": { - // "$ref": "ListLogEntriesRequest" - // }, - // "response": { - // "$ref": "ListLogEntriesResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *EntriesListCall) Pages(ctx context.Context, f func(*ListLogEntriesResponse) error) error { - c.ctx_ = ctx - defer func(pt string) { c.listlogentriesrequest.PageToken = pt }(c.listlogentriesrequest.PageToken) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.listlogentriesrequest.PageToken = x.NextPageToken - } -} - -// method id "logging.entries.write": - -type EntriesWriteCall struct { - s *Service - writelogentriesrequest *WriteLogEntriesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Write: Writes log entries to Stackdriver Logging. -func (r *EntriesService) Write(writelogentriesrequest *WriteLogEntriesRequest) *EntriesWriteCall { - c := &EntriesWriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.writelogentriesrequest = writelogentriesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *EntriesWriteCall) Fields(s ...googleapi.Field) *EntriesWriteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *EntriesWriteCall) Context(ctx context.Context) *EntriesWriteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *EntriesWriteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *EntriesWriteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.writelogentriesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/entries:write") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.entries.write" call. -// Exactly one of *WriteLogEntriesResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *WriteLogEntriesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *EntriesWriteCall) Do(opts ...googleapi.CallOption) (*WriteLogEntriesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &WriteLogEntriesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Writes log entries to Stackdriver Logging.", - // "flatPath": "v2beta1/entries:write", - // "httpMethod": "POST", - // "id": "logging.entries.write", - // "parameterOrder": [], - // "parameters": {}, - // "path": "v2beta1/entries:write", - // "request": { - // "$ref": "WriteLogEntriesRequest" - // }, - // "response": { - // "$ref": "WriteLogEntriesResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.write" - // ] - // } - -} - -// method id "logging.monitoredResourceDescriptors.list": - -type MonitoredResourceDescriptorsListCall struct { - s *Service - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists the descriptors for monitored resource types used by -// Stackdriver Logging. -func (r *MonitoredResourceDescriptorsService) List() *MonitoredResourceDescriptorsListCall { - c := &MonitoredResourceDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *MonitoredResourceDescriptorsListCall) PageSize(pageSize int64) *MonitoredResourceDescriptorsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *MonitoredResourceDescriptorsListCall) PageToken(pageToken string) *MonitoredResourceDescriptorsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *MonitoredResourceDescriptorsListCall) Fields(s ...googleapi.Field) *MonitoredResourceDescriptorsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MonitoredResourceDescriptorsListCall) IfNoneMatch(entityTag string) *MonitoredResourceDescriptorsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *MonitoredResourceDescriptorsListCall) Context(ctx context.Context) *MonitoredResourceDescriptorsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *MonitoredResourceDescriptorsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *MonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/monitoredResourceDescriptors") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.monitoredResourceDescriptors.list" call. -// Exactly one of *ListMonitoredResourceDescriptorsResponse or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *ListMonitoredResourceDescriptorsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *MonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListMonitoredResourceDescriptorsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists the descriptors for monitored resource types used by Stackdriver Logging.", - // "flatPath": "v2beta1/monitoredResourceDescriptors", - // "httpMethod": "GET", - // "id": "logging.monitoredResourceDescriptors.list", - // "parameterOrder": [], - // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v2beta1/monitoredResourceDescriptors", - // "response": { - // "$ref": "ListMonitoredResourceDescriptorsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MonitoredResourceDescriptorsListCall) Pages(ctx context.Context, f func(*ListMonitoredResourceDescriptorsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.organizations.logs.delete": - -type OrganizationsLogsDeleteCall struct { - s *Service - logName string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. Log entries written shortly before the delete -// operation might not be deleted. -func (r *OrganizationsLogsService) Delete(logName string) *OrganizationsLogsDeleteCall { - c := &OrganizationsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.logName = logName - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *OrganizationsLogsDeleteCall) Fields(s ...googleapi.Field) *OrganizationsLogsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *OrganizationsLogsDeleteCall) Context(ctx context.Context) *OrganizationsLogsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *OrganizationsLogsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *OrganizationsLogsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+logName}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "logName": c.logName, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.organizations.logs.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *OrganizationsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", - // "flatPath": "v2beta1/organizations/{organizationsId}/logs/{logsId}", - // "httpMethod": "DELETE", - // "id": "logging.organizations.logs.delete", - // "parameterOrder": [ - // "logName" - // ], - // "parameters": { - // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - // "location": "path", - // "pattern": "^organizations/[^/]+/logs/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+logName}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" - // ] - // } - -} - -// method id "logging.organizations.logs.list": - -type OrganizationsLogsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists the logs in projects, organizations, folders, or billing -// accounts. Only logs that have entries are listed. -func (r *OrganizationsLogsService) List(parent string) *OrganizationsLogsListCall { - c := &OrganizationsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *OrganizationsLogsListCall) PageSize(pageSize int64) *OrganizationsLogsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *OrganizationsLogsListCall) PageToken(pageToken string) *OrganizationsLogsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *OrganizationsLogsListCall) Fields(s ...googleapi.Field) *OrganizationsLogsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OrganizationsLogsListCall) IfNoneMatch(entityTag string) *OrganizationsLogsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *OrganizationsLogsListCall) Context(ctx context.Context) *OrganizationsLogsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *OrganizationsLogsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *OrganizationsLogsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+parent}/logs") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.organizations.logs.list" call. -// Exactly one of *ListLogsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListLogsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListLogsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - // "flatPath": "v2beta1/organizations/{organizationsId}/logs", - // "httpMethod": "GET", - // "id": "logging.organizations.logs.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - // "location": "path", - // "pattern": "^organizations/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+parent}/logs", - // "response": { - // "$ref": "ListLogsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *OrganizationsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.projects.logs.delete": - -type ProjectsLogsDeleteCall struct { - s *Service - logName string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes all the log entries in a log. The log reappears if it -// receives new entries. Log entries written shortly before the delete -// operation might not be deleted. -func (r *ProjectsLogsService) Delete(logName string) *ProjectsLogsDeleteCall { - c := &ProjectsLogsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.logName = logName - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLogsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLogsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLogsDeleteCall) Context(ctx context.Context) *ProjectsLogsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLogsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLogsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+logName}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "logName": c.logName, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.logs.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLogsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes all the log entries in a log. The log reappears if it receives new entries. Log entries written shortly before the delete operation might not be deleted.", - // "flatPath": "v2beta1/projects/{projectsId}/logs/{logsId}", - // "httpMethod": "DELETE", - // "id": "logging.projects.logs.delete", - // "parameterOrder": [ - // "logName" - // ], - // "parameters": { - // "logName": { - // "description": "Required. The resource name of the log to delete:\n\"projects/[PROJECT_ID]/logs/[LOG_ID]\"\n\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"\n\"folders/[FOLDER_ID]/logs/[LOG_ID]\"\n[LOG_ID] must be URL-encoded. For example, \"projects/my-project-id/logs/syslog\", \"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\". For more information about log names, see LogEntry.", - // "location": "path", - // "pattern": "^projects/[^/]+/logs/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+logName}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" - // ] - // } - -} - -// method id "logging.projects.logs.list": - -type ProjectsLogsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists the logs in projects, organizations, folders, or billing -// accounts. Only logs that have entries are listed. -func (r *ProjectsLogsService) List(parent string) *ProjectsLogsListCall { - c := &ProjectsLogsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *ProjectsLogsListCall) PageSize(pageSize int64) *ProjectsLogsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *ProjectsLogsListCall) PageToken(pageToken string) *ProjectsLogsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLogsListCall) Fields(s ...googleapi.Field) *ProjectsLogsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLogsListCall) IfNoneMatch(entityTag string) *ProjectsLogsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLogsListCall) Context(ctx context.Context) *ProjectsLogsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLogsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLogsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+parent}/logs") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.logs.list" call. -// Exactly one of *ListLogsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListLogsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListLogsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists the logs in projects, organizations, folders, or billing accounts. Only logs that have entries are listed.", - // "flatPath": "v2beta1/projects/{projectsId}/logs", - // "httpMethod": "GET", - // "id": "logging.projects.logs.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name that owns the logs:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+parent}/logs", - // "response": { - // "$ref": "ListLogsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLogsListCall) Pages(ctx context.Context, f func(*ListLogsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.projects.metrics.create": - -type ProjectsMetricsCreateCall struct { - s *Service - parent string - logmetric *LogMetric - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Creates a logs-based metric. -func (r *ProjectsMetricsService) Create(parent string, logmetric *LogMetric) *ProjectsMetricsCreateCall { - c := &ProjectsMetricsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.logmetric = logmetric - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMetricsCreateCall) Fields(s ...googleapi.Field) *ProjectsMetricsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMetricsCreateCall) Context(ctx context.Context) *ProjectsMetricsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMetricsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMetricsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+parent}/metrics") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.metrics.create" call. -// Exactly one of *LogMetric or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogMetric.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMetricsCreateCall) Do(opts ...googleapi.CallOption) (*LogMetric, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &LogMetric{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a logs-based metric.", - // "flatPath": "v2beta1/projects/{projectsId}/metrics", - // "httpMethod": "POST", - // "id": "logging.projects.metrics.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "parent": { - // "description": "The resource name of the project in which to create the metric:\n\"projects/[PROJECT_ID]\"\nThe new metric must be provided in the request.", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+parent}/metrics", - // "request": { - // "$ref": "LogMetric" - // }, - // "response": { - // "$ref": "LogMetric" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.write" - // ] - // } - -} - -// method id "logging.projects.metrics.delete": - -type ProjectsMetricsDeleteCall struct { - s *Service - metricName string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes a logs-based metric. -func (r *ProjectsMetricsService) Delete(metricName string) *ProjectsMetricsDeleteCall { - c := &ProjectsMetricsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.metricName = metricName - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMetricsDeleteCall) Fields(s ...googleapi.Field) *ProjectsMetricsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMetricsDeleteCall) Context(ctx context.Context) *ProjectsMetricsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMetricsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMetricsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+metricName}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "metricName": c.metricName, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.metrics.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsMetricsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes a logs-based metric.", - // "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", - // "httpMethod": "DELETE", - // "id": "logging.projects.metrics.delete", - // "parameterOrder": [ - // "metricName" - // ], - // "parameters": { - // "metricName": { - // "description": "The resource name of the metric to delete:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", - // "location": "path", - // "pattern": "^projects/[^/]+/metrics/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+metricName}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.write" - // ] - // } - -} - -// method id "logging.projects.metrics.get": - -type ProjectsMetricsGetCall struct { - s *Service - metricName string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Gets a logs-based metric. -func (r *ProjectsMetricsService) Get(metricName string) *ProjectsMetricsGetCall { - c := &ProjectsMetricsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.metricName = metricName - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMetricsGetCall) Fields(s ...googleapi.Field) *ProjectsMetricsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsMetricsGetCall) IfNoneMatch(entityTag string) *ProjectsMetricsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMetricsGetCall) Context(ctx context.Context) *ProjectsMetricsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMetricsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMetricsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+metricName}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "metricName": c.metricName, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.metrics.get" call. -// Exactly one of *LogMetric or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogMetric.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMetricsGetCall) Do(opts ...googleapi.CallOption) (*LogMetric, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &LogMetric{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets a logs-based metric.", - // "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", - // "httpMethod": "GET", - // "id": "logging.projects.metrics.get", - // "parameterOrder": [ - // "metricName" - // ], - // "parameters": { - // "metricName": { - // "description": "The resource name of the desired metric:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\n", - // "location": "path", - // "pattern": "^projects/[^/]+/metrics/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+metricName}", - // "response": { - // "$ref": "LogMetric" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" - // ] - // } - -} - -// method id "logging.projects.metrics.list": - -type ProjectsMetricsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists logs-based metrics. -func (r *ProjectsMetricsService) List(parent string) *ProjectsMetricsListCall { - c := &ProjectsMetricsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *ProjectsMetricsListCall) PageSize(pageSize int64) *ProjectsMetricsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *ProjectsMetricsListCall) PageToken(pageToken string) *ProjectsMetricsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMetricsListCall) Fields(s ...googleapi.Field) *ProjectsMetricsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsMetricsListCall) IfNoneMatch(entityTag string) *ProjectsMetricsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMetricsListCall) Context(ctx context.Context) *ProjectsMetricsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMetricsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMetricsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+parent}/metrics") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.metrics.list" call. -// Exactly one of *ListLogMetricsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListLogMetricsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsMetricsListCall) Do(opts ...googleapi.CallOption) (*ListLogMetricsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListLogMetricsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists logs-based metrics.", - // "flatPath": "v2beta1/projects/{projectsId}/metrics", - // "httpMethod": "GET", - // "id": "logging.projects.metrics.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The name of the project containing the metrics:\n\"projects/[PROJECT_ID]\"\n", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+parent}/metrics", - // "response": { - // "$ref": "ListLogMetricsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsMetricsListCall) Pages(ctx context.Context, f func(*ListLogMetricsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.projects.metrics.update": - -type ProjectsMetricsUpdateCall struct { - s *Service - metricName string - logmetric *LogMetric - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Creates or updates a logs-based metric. -func (r *ProjectsMetricsService) Update(metricName string, logmetric *LogMetric) *ProjectsMetricsUpdateCall { - c := &ProjectsMetricsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.metricName = metricName - c.logmetric = logmetric - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMetricsUpdateCall) Fields(s ...googleapi.Field) *ProjectsMetricsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMetricsUpdateCall) Context(ctx context.Context) *ProjectsMetricsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMetricsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMetricsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logmetric) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+metricName}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "metricName": c.metricName, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.metrics.update" call. -// Exactly one of *LogMetric or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LogMetric.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMetricsUpdateCall) Do(opts ...googleapi.CallOption) (*LogMetric, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &LogMetric{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates or updates a logs-based metric.", - // "flatPath": "v2beta1/projects/{projectsId}/metrics/{metricsId}", - // "httpMethod": "PUT", - // "id": "logging.projects.metrics.update", - // "parameterOrder": [ - // "metricName" - // ], - // "parameters": { - // "metricName": { - // "description": "The resource name of the metric to update:\n\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"\nThe updated metric must be provided in the request and it's name field must be the same as [METRIC_ID] If the metric does not exist in [PROJECT_ID], then a new metric is created.", - // "location": "path", - // "pattern": "^projects/[^/]+/metrics/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+metricName}", - // "request": { - // "$ref": "LogMetric" - // }, - // "response": { - // "$ref": "LogMetric" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.write" - // ] - // } - -} - -// method id "logging.projects.sinks.create": - -type ProjectsSinksCreateCall struct { - s *Service - parent string - logsink *LogSink - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Creates a sink that exports specified log entries to a -// destination. The export of newly-ingested log entries begins -// immediately, unless the current time is outside the sink's start and -// end times or the sink's writer_identity is not permitted to write to -// the destination. A sink can export log entries only from the resource -// owning the sink. -func (r *ProjectsSinksService) Create(parent string, logsink *LogSink) *ProjectsSinksCreateCall { - c := &ProjectsSinksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.logsink = logsink - return c -} - -// UniqueWriterIdentity sets the optional parameter -// "uniqueWriterIdentity": Determines the kind of IAM identity returned -// as writer_identity in the new sink. If this value is omitted or set -// to false, and if the sink's parent is a project, then the value -// returned as writer_identity is the same group or service account used -// by Stackdriver Logging before the addition of writer identities to -// this API. The sink's destination must be in the same project as the -// sink itself.If this field is set to true, or if the sink is owned by -// a non-project resource such as an organization, then the value of -// writer_identity will be a unique service account used only for -// exports from the new sink. For more information, see writer_identity -// in LogSink. -func (c *ProjectsSinksCreateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *ProjectsSinksCreateCall { - c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSinksCreateCall) Fields(s ...googleapi.Field) *ProjectsSinksCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSinksCreateCall) Context(ctx context.Context) *ProjectsSinksCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSinksCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSinksCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+parent}/sinks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.sinks.create" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &LogSink{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a sink that exports specified log entries to a destination. The export of newly-ingested log entries begins immediately, unless the current time is outside the sink's start and end times or the sink's writer_identity is not permitted to write to the destination. A sink can export log entries only from the resource owning the sink.", - // "flatPath": "v2beta1/projects/{projectsId}/sinks", - // "httpMethod": "POST", - // "id": "logging.projects.sinks.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "parent": { - // "description": "Required. The resource in which to create the sink:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\nExamples: \"projects/my-logging-project\", \"organizations/123456789\".", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "uniqueWriterIdentity": { - // "description": "Optional. Determines the kind of IAM identity returned as writer_identity in the new sink. If this value is omitted or set to false, and if the sink's parent is a project, then the value returned as writer_identity is the same group or service account used by Stackdriver Logging before the addition of writer identities to this API. The sink's destination must be in the same project as the sink itself.If this field is set to true, or if the sink is owned by a non-project resource such as an organization, then the value of writer_identity will be a unique service account used only for exports from the new sink. For more information, see writer_identity in LogSink.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "v2beta1/{+parent}/sinks", - // "request": { - // "$ref": "LogSink" - // }, - // "response": { - // "$ref": "LogSink" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" - // ] - // } - -} - -// method id "logging.projects.sinks.delete": - -type ProjectsSinksDeleteCall struct { - s *Service - sinkNameid string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes a sink. If the sink has a unique writer_identity, -// then that service account is also deleted. -func (r *ProjectsSinksService) Delete(sinkNameid string) *ProjectsSinksDeleteCall { - c := &ProjectsSinksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkNameid = sinkNameid - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSinksDeleteCall) Fields(s ...googleapi.Field) *ProjectsSinksDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSinksDeleteCall) Context(ctx context.Context) *ProjectsSinksDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSinksDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSinksDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+sinkName}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkNameid, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.sinks.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSinksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes a sink. If the sink has a unique writer_identity, then that service account is also deleted.", - // "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", - // "httpMethod": "DELETE", - // "id": "logging.projects.sinks.delete", - // "parameterOrder": [ - // "sinkName" - // ], - // "parameters": { - // "sinkName": { - // "description": "Required. The full resource name of the sink to delete, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", - // "location": "path", - // "pattern": "^projects/[^/]+/sinks/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+sinkName}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" - // ] - // } - -} - -// method id "logging.projects.sinks.get": - -type ProjectsSinksGetCall struct { - s *Service - sinkName string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Gets a sink. -func (r *ProjectsSinksService) Get(sinkName string) *ProjectsSinksGetCall { - c := &ProjectsSinksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkName = sinkName - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSinksGetCall) Fields(s ...googleapi.Field) *ProjectsSinksGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsSinksGetCall) IfNoneMatch(entityTag string) *ProjectsSinksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSinksGetCall) Context(ctx context.Context) *ProjectsSinksGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSinksGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSinksGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+sinkName}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkName, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.sinks.get" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSinksGetCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &LogSink{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets a sink.", - // "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", - // "httpMethod": "GET", - // "id": "logging.projects.sinks.get", - // "parameterOrder": [ - // "sinkName" - // ], - // "parameters": { - // "sinkName": { - // "description": "Required. The resource name of the sink:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", - // "location": "path", - // "pattern": "^projects/[^/]+/sinks/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+sinkName}", - // "response": { - // "$ref": "LogSink" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" - // ] - // } - -} - -// method id "logging.projects.sinks.list": - -type ProjectsSinksListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists sinks. -func (r *ProjectsSinksService) List(parent string) *ProjectsSinksListCall { - c := &ProjectsSinksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return from this request. Non-positive values are -// ignored. The presence of nextPageToken in the response indicates that -// more results might be available. -func (c *ProjectsSinksListCall) PageSize(pageSize int64) *ProjectsSinksListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If present, then -// retrieve the next batch of results from the preceding call to this -// method. pageToken must be the value of nextPageToken from the -// previous response. The values of other method parameters should be -// identical to those in the previous call. -func (c *ProjectsSinksListCall) PageToken(pageToken string) *ProjectsSinksListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSinksListCall) Fields(s ...googleapi.Field) *ProjectsSinksListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsSinksListCall) IfNoneMatch(entityTag string) *ProjectsSinksListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSinksListCall) Context(ctx context.Context) *ProjectsSinksListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSinksListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSinksListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+parent}/sinks") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.sinks.list" call. -// Exactly one of *ListSinksResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListSinksResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsSinksListCall) Do(opts ...googleapi.CallOption) (*ListSinksResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListSinksResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists sinks.", - // "flatPath": "v2beta1/projects/{projectsId}/sinks", - // "httpMethod": "GET", - // "id": "logging.projects.sinks.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "pageSize": { - // "description": "Optional. The maximum number of results to return from this request. Non-positive values are ignored. The presence of nextPageToken in the response indicates that more results might be available.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. If present, then retrieve the next batch of results from the preceding call to this method. pageToken must be the value of nextPageToken from the previous response. The values of other method parameters should be identical to those in the previous call.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The parent resource whose sinks are to be listed:\n\"projects/[PROJECT_ID]\"\n\"organizations/[ORGANIZATION_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]\"\n\"folders/[FOLDER_ID]\"\n", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v2beta1/{+parent}/sinks", - // "response": { - // "$ref": "ListSinksResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/logging.admin", - // "https://www.googleapis.com/auth/logging.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsSinksListCall) Pages(ctx context.Context, f func(*ListSinksResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "logging.projects.sinks.update": - -type ProjectsSinksUpdateCall struct { - s *Service - sinkNameid string - logsink *LogSink - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a sink. If the named sink doesn't exist, then this -// method is identical to sinks.create. If the named sink does exist, -// then this method replaces the following fields in the existing sink -// with values from the new sink: destination, filter, -// output_version_format, start_time, and end_time. The updated filter -// might also have a new writer_identity; see the unique_writer_identity -// field. -func (r *ProjectsSinksService) Update(sinkNameid string, logsink *LogSink) *ProjectsSinksUpdateCall { - c := &ProjectsSinksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sinkNameid = sinkNameid - c.logsink = logsink - return c -} - -// UniqueWriterIdentity sets the optional parameter -// "uniqueWriterIdentity": See sinks.create for a description of this -// field. When updating a sink, the effect of this field on the value of -// writer_identity in the updated sink depends on both the old and new -// values of this field: -// If the old and new values of this field are both false or both true, -// then there is no change to the sink's writer_identity. -// If the old value is false and the new value is true, then -// writer_identity is changed to a unique service account. -// It is an error if the old value is true and the new value is false. -func (c *ProjectsSinksUpdateCall) UniqueWriterIdentity(uniqueWriterIdentity bool) *ProjectsSinksUpdateCall { - c.urlParams_.Set("uniqueWriterIdentity", fmt.Sprint(uniqueWriterIdentity)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSinksUpdateCall) Fields(s ...googleapi.Field) *ProjectsSinksUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSinksUpdateCall) Context(ctx context.Context) *ProjectsSinksUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSinksUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSinksUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.logsink) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v2beta1/{+sinkName}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "sinkName": c.sinkNameid, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "logging.projects.sinks.update" call. -// Exactly one of *LogSink or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *LogSink.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &LogSink{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a sink. If the named sink doesn't exist, then this method is identical to sinks.create. If the named sink does exist, then this method replaces the following fields in the existing sink with values from the new sink: destination, filter, output_version_format, start_time, and end_time. The updated filter might also have a new writer_identity; see the unique_writer_identity field.", - // "flatPath": "v2beta1/projects/{projectsId}/sinks/{sinksId}", - // "httpMethod": "PUT", - // "id": "logging.projects.sinks.update", - // "parameterOrder": [ - // "sinkName" - // ], - // "parameters": { - // "sinkName": { - // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier:\n\"projects/[PROJECT_ID]/sinks/[SINK_ID]\"\n\"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\"\n\"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\"\n\"folders/[FOLDER_ID]/sinks/[SINK_ID]\"\nExample: \"projects/my-project-id/sinks/my-sink-id\".", - // "location": "path", - // "pattern": "^projects/[^/]+/sinks/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "uniqueWriterIdentity": { - // "description": "Optional. See sinks.create for a description of this field. When updating a sink, the effect of this field on the value of writer_identity in the updated sink depends on both the old and new values of this field:\nIf the old and new values of this field are both false or both true, then there is no change to the sink's writer_identity.\nIf the old value is false and the new value is true, then writer_identity is changed to a unique service account.\nIt is an error if the old value is true and the new value is false.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "v2beta1/{+sinkName}", - // "request": { - // "$ref": "LogSink" - // }, - // "response": { - // "$ref": "LogSink" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/logging.admin" - // ] - // } - -} diff --git a/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json b/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json deleted file mode 100644 index a22e70a0655..00000000000 --- a/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json +++ /dev/null @@ -1,1684 +0,0 @@ -{ - "resources": { - "projects": { - "resources": { - "collectdTimeSeries": { - "methods": { - "create": { - "httpMethod": "POST", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Empty" - }, - "parameters": { - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The project in which to create the time series. The format is \"projects/PROJECT_ID_OR_NUMBER\".", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/collectdTimeSeries", - "id": "monitoring.projects.collectdTimeSeries.create", - "path": "v3/{+name}/collectdTimeSeries", - "request": { - "$ref": "CreateCollectdTimeSeriesRequest" - }, - "description": "Stackdriver Monitoring Agent only: Creates a new time series.\u003caside class=\"caution\"\u003eThis method is only for use by the Stackdriver Monitoring Agent. Use projects.timeSeries.create instead.\u003c/aside\u003e" - } - } - }, - "timeSeries": { - "methods": { - "list": { - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "response": { - "$ref": "ListTimeSeriesResponse" - }, - "parameters": { - "secondaryAggregation.alignmentPeriod": { - "description": "The alignment period for per-time series alignment. If present, alignmentPeriod must be at least 60 seconds. After per-time series alignment, each time series will contain data points only on the period boundaries. If perSeriesAligner is not specified or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then this field must be defined; otherwise an error is returned.", - "format": "google-duration", - "type": "string", - "location": "query" - }, - "pageSize": { - "description": "A positive number that is the maximum number of results to return. When view field sets to FULL, it limits the number of Points server will return; if view field is HEADERS, it limits the number of TimeSeries server will return.", - "format": "int32", - "type": "integer", - "location": "query" - }, - "secondaryAggregation.perSeriesAligner": { - "enum": [ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05" - ], - "description": "The approach to be used to align individual time series. Not all alignment functions may be applied to all time series, depending on the metric type and value type of the original time series. Alignment may change the metric type or the value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", - "type": "string", - "location": "query" - }, - "orderBy": { - "location": "query", - "description": "Specifies the order in which the points of the time series should be returned. By default, results are not ordered. Currently, this field must be left blank.", - "type": "string" - }, - "aggregation.crossSeriesReducer": { - "location": "query", - "enum": [ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05" - ], - "description": "The approach to be used to combine time series. Not all reducer functions may be applied to all time series, depending on the metric type and the value type of the original time series. Reduction may change the metric type of value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", - "type": "string" - }, - "filter": { - "description": "A monitoring filter that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example:\nmetric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND\n metric.label.instance_name = \"my-instance-name\"\n", - "type": "string", - "location": "query" - }, - "pageToken": { - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string", - "location": "query" - }, - "aggregation.perSeriesAligner": { - "enum": [ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05" - ], - "description": "The approach to be used to align individual time series. Not all alignment functions may be applied to all time series, depending on the metric type and value type of the original time series. Alignment may change the metric type or the value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", - "type": "string", - "location": "query" - }, - "interval.startTime": { - "location": "query", - "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", - "format": "google-datetime", - "type": "string" - }, - "view": { - "enum": [ - "FULL", - "HEADERS" - ], - "description": "Specifies which information is returned about the time series.", - "type": "string", - "location": "query" - }, - "secondaryAggregation.crossSeriesReducer": { - "location": "query", - "enum": [ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05" - ], - "description": "The approach to be used to combine time series. Not all reducer functions may be applied to all time series, depending on the metric type and the value type of the original time series. Reduction may change the metric type of value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", - "type": "string" - }, - "secondaryAggregation.groupByFields": { - "description": "The set of fields to preserve when crossSeriesReducer is specified. The groupByFields determine how the time series are partitioned into subsets prior to applying the aggregation function. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The crossSeriesReducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in groupByFields are aggregated away. If groupByFields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If crossSeriesReducer is not defined, this field is ignored.", - "type": "string", - "repeated": true, - "location": "query" - }, - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string" - }, - "aggregation.groupByFields": { - "repeated": true, - "location": "query", - "description": "The set of fields to preserve when crossSeriesReducer is specified. The groupByFields determine how the time series are partitioned into subsets prior to applying the aggregation function. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The crossSeriesReducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in groupByFields are aggregated away. If groupByFields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If crossSeriesReducer is not defined, this field is ignored.", - "type": "string" - }, - "interval.endTime": { - "description": "Required. The end of the time interval.", - "format": "google-datetime", - "type": "string", - "location": "query" - }, - "aggregation.alignmentPeriod": { - "description": "The alignment period for per-time series alignment. If present, alignmentPeriod must be at least 60 seconds. After per-time series alignment, each time series will contain data points only on the period boundaries. If perSeriesAligner is not specified or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then this field must be defined; otherwise an error is returned.", - "format": "google-duration", - "type": "string", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "flatPath": "v3/projects/{projectsId}/timeSeries", - "path": "v3/{+name}/timeSeries", - "id": "monitoring.projects.timeSeries.list", - "description": "Lists time series that match a filter. This method does not require a Stackdriver account." - }, - "create": { - "httpMethod": "POST", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Empty" - }, - "parameters": { - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/timeSeries", - "id": "monitoring.projects.timeSeries.create", - "path": "v3/{+name}/timeSeries", - "request": { - "$ref": "CreateTimeSeriesRequest" - }, - "description": "Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response." - } - } - }, - "metricDescriptors": { - "methods": { - "create": { - "response": { - "$ref": "MetricDescriptor" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "parameters": { - "name": { - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/metricDescriptors", - "path": "v3/{+name}/metricDescriptors", - "id": "monitoring.projects.metricDescriptors.create", - "request": { - "$ref": "MetricDescriptor" - }, - "description": "Creates a new metric descriptor. User-created metric descriptors define custom metrics." - }, - "delete": { - "httpMethod": "DELETE", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/metricDescriptors/.+$", - "location": "path", - "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example of {metric_id} is: \"custom.googleapis.com/my_test_metric\".", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", - "id": "monitoring.projects.metricDescriptors.delete", - "path": "v3/{+name}", - "description": "Deletes a metric descriptor. Only user-created custom metrics can be deleted." - }, - "list": { - "response": { - "$ref": "ListMetricDescriptorsResponse" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "parameters": { - "filter": { - "location": "query", - "description": "If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the filter specifies which metric descriptors are to be returned. For example, the following filter matches all custom metrics:\nmetric.type = starts_with(\"custom.googleapis.com/\")\n", - "type": "string" - }, - "name": { - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - }, - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "pageSize": { - "location": "query", - "description": "A positive number that is the maximum number of results to return.", - "format": "int32", - "type": "integer" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/metricDescriptors", - "path": "v3/{+name}/metricDescriptors", - "id": "monitoring.projects.metricDescriptors.list", - "description": "Lists metric descriptors that match a filter. This method does not require a Stackdriver account." - }, - "get": { - "path": "v3/{+name}", - "id": "monitoring.projects.metricDescriptors.get", - "description": "Gets a single metric descriptor. This method does not require a Stackdriver account.", - "response": { - "$ref": "MetricDescriptor" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "parameters": { - "name": { - "pattern": "^projects/[^/]+/metricDescriptors/.+$", - "location": "path", - "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example value of {metric_id} is \"compute.googleapis.com/instance/disk/read_bytes_count\".", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}" - } - } - }, - "monitoredResourceDescriptors": { - "methods": { - "list": { - "description": "Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.", - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "response": { - "$ref": "ListMonitoredResourceDescriptorsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "parameters": { - "filter": { - "description": "An optional filter describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an id label:\nresource.type = starts_with(\"gce_\") AND resource.label:id\n", - "type": "string", - "location": "query" - }, - "name": { - "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - }, - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "pageSize": { - "location": "query", - "description": "A positive number that is the maximum number of results to return.", - "format": "int32", - "type": "integer" - } - }, - "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors", - "path": "v3/{+name}/monitoredResourceDescriptors", - "id": "monitoring.projects.monitoredResourceDescriptors.list" - }, - "get": { - "description": "Gets a single monitored resource descriptor. This method does not require a Stackdriver account.", - "httpMethod": "GET", - "response": { - "$ref": "MonitoredResourceDescriptor" - }, - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/monitoredResourceDescriptors/[^/]+$", - "location": "path", - "description": "The monitored resource descriptor to get. The format is \"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}\". The {resource_type} is a predefined type, such as cloudsql_database.", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors/{monitoredResourceDescriptorsId}", - "id": "monitoring.projects.monitoredResourceDescriptors.get", - "path": "v3/{+name}" - } - } - }, - "groups": { - "methods": { - "delete": { - "path": "v3/{+name}", - "id": "monitoring.projects.groups.delete", - "description": "Deletes an existing group.", - "parameterOrder": [ - "name" - ], - "httpMethod": "DELETE", - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/groups/[^/]+$", - "location": "path", - "description": "The group to delete. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - "required": true, - "type": "string" - } - }, - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}" - }, - "list": { - "description": "Lists the existing groups.", - "httpMethod": "GET", - "response": { - "$ref": "ListGroupsResponse" - }, - "parameterOrder": [ - "name" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "parameters": { - "descendantsOfGroup": { - "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns the descendants of the specified group. This is a superset of the results returned by the childrenOfGroup filter, and includes children-of-children, and so forth.", - "type": "string", - "location": "query" - }, - "pageToken": { - "location": "query", - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string" - }, - "pageSize": { - "location": "query", - "description": "A positive number that is the maximum number of results to return.", - "format": "int32", - "type": "integer" - }, - "ancestorsOfGroup": { - "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty.", - "type": "string", - "location": "query" - }, - "name": { - "description": "The project whose groups are to be listed. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - }, - "childrenOfGroup": { - "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups whose parentName field contains the group name. If no groups have this parent, the results are empty.", - "type": "string", - "location": "query" - } - }, - "flatPath": "v3/projects/{projectsId}/groups", - "id": "monitoring.projects.groups.list", - "path": "v3/{+name}/groups" - }, - "get": { - "httpMethod": "GET", - "response": { - "$ref": "Group" - }, - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "The group to retrieve. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/groups/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - "id": "monitoring.projects.groups.get", - "path": "v3/{+name}", - "description": "Gets a single group." - }, - "update": { - "response": { - "$ref": "Group" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "PUT", - "parameters": { - "name": { - "description": "Output only. The name of this group. The format is \"projects/{project_id_or_number}/groups/{group_id}\". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/groups/[^/]+$", - "location": "path" - }, - "validateOnly": { - "location": "query", - "description": "If true, validate this request but do not update the existing group.", - "type": "boolean" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - "path": "v3/{+name}", - "id": "monitoring.projects.groups.update", - "request": { - "$ref": "Group" - }, - "description": "Updates an existing group. You can change any group attributes except name." - }, - "create": { - "response": { - "$ref": "Group" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "parameters": { - "name": { - "description": "The project in which to create the group. The format is \"projects/{project_id_or_number}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - }, - "validateOnly": { - "location": "query", - "description": "If true, validate this request but do not create the group.", - "type": "boolean" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring" - ], - "flatPath": "v3/projects/{projectsId}/groups", - "path": "v3/{+name}/groups", - "id": "monitoring.projects.groups.create", - "request": { - "$ref": "Group" - }, - "description": "Creates a new group." - } - }, - "resources": { - "members": { - "methods": { - "list": { - "id": "monitoring.projects.groups.members.list", - "path": "v3/{+name}/members", - "description": "Lists the monitored resources that are members of a group.", - "httpMethod": "GET", - "response": { - "$ref": "ListGroupMembersResponse" - }, - "parameterOrder": [ - "name" - ], - "parameters": { - "pageToken": { - "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - "type": "string", - "location": "query" - }, - "interval.startTime": { - "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", - "format": "google-datetime", - "type": "string", - "location": "query" - }, - "pageSize": { - "description": "A positive number that is the maximum number of results to return.", - "format": "int32", - "type": "integer", - "location": "query" - }, - "name": { - "description": "The group whose members are listed. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/groups/[^/]+$", - "location": "path" - }, - "interval.endTime": { - "description": "Required. The end of the time interval.", - "format": "google-datetime", - "type": "string", - "location": "query" - }, - "filter": { - "description": "An optional list filter describing the members to be returned. The filter may reference the type, labels, and metadata of monitored resources that comprise the group. For example, to return only resources representing Compute Engine VM instances, use this filter:\nresource.type = \"gce_instance\"\n", - "type": "string", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "flatPath": "v3/projects/{projectsId}/groups/{groupsId}/members" - } - } - } - } - } - } - } - }, - "parameters": { - "bearer_token": { - "location": "query", - "description": "OAuth bearer token.", - "type": "string" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "location": "query", - "description": "Returns response with indentations and line breaks.", - "type": "boolean", - "default": "true" - }, - "uploadType": { - "location": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "$.xgafv": { - "enum": [ - "1", - "2" - ], - "description": "V1 error format.", - "type": "string", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query" - }, - "alt": { - "enum": [ - "json", - "media", - "proto" - ], - "type": "string", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "description": "Data format for response.", - "default": "json" - }, - "key": { - "location": "query", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string" - }, - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "location": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string" - }, - "pp": { - "description": "Pretty-print response.", - "type": "boolean", - "default": "true", - "location": "query" - } - }, - "version": "v3", - "baseUrl": "https://monitoring.googleapis.com/", - "kind": "discovery#restDescription", - "servicePath": "", - "description": "Manages your Stackdriver Monitoring data and configurations. Most projects must be associated with a Stackdriver account, with a few exceptions as noted on the individual method pages.", - "basePath": "", - "documentationLink": "https://cloud.google.com/monitoring/api/", - "id": "monitoring:v3", - "revision": "20170424", - "discoveryVersion": "v1", - "version_module": "True", - "schemas": { - "LabelDescriptor": { - "description": "A description of a label.", - "type": "object", - "properties": { - "key": { - "description": "The label key.", - "type": "string" - }, - "description": { - "description": "A human-readable description for the label.", - "type": "string" - }, - "valueType": { - "enumDescriptions": [ - "A variable-length string. This is the default.", - "Boolean; true or false.", - "A 64-bit signed integer." - ], - "enum": [ - "STRING", - "BOOL", - "INT64" - ], - "description": "The type of data that can be assigned to the label.", - "type": "string" - } - }, - "id": "LabelDescriptor" - }, - "ListTimeSeriesResponse": { - "properties": { - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - }, - "timeSeries": { - "description": "One or more time series that match the filter included in the request.", - "type": "array", - "items": { - "$ref": "TimeSeries" - } - } - }, - "id": "ListTimeSeriesResponse", - "description": "The ListTimeSeries response.", - "type": "object" - }, - "Group": { - "properties": { - "filter": { - "description": "The filter used to determine which monitored resources belong to this group.", - "type": "string" - }, - "name": { - "description": "Output only. The name of this group. The format is \"projects/{project_id_or_number}/groups/{group_id}\". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.", - "type": "string" - }, - "parentName": { - "description": "The name of the group's parent, if it has one. The format is \"projects/{project_id_or_number}/groups/{group_id}\". For groups with no parent, parentName is the empty string, \"\".", - "type": "string" - }, - "displayName": { - "description": "A user-assigned name for this group, used only for display purposes.", - "type": "string" - }, - "isCluster": { - "description": "If true, the members of this group are considered to be a cluster. The system can perform additional analysis on groups that are clusters.", - "type": "boolean" - } - }, - "id": "Group", - "description": "The description of a dynamic collection of monitored resources. Each group has a filter that is matched against monitored resources and their associated metadata. If a group's filter matches an available monitored resource, then that resource is a member of that group. Groups can contain any number of monitored resources, and each monitored resource can be a member of any number of groups.Groups can be nested in parent-child hierarchies. The parentName field identifies an optional parent for each group. If a group has a parent, then the only monitored resources available to be matched by the group's filter are the resources contained in the parent group. In other words, a group contains the monitored resources that match its filter and the filters of all the group's ancestors. A group without a parent can contain any monitored resource.For example, consider an infrastructure running a set of instances with two user-defined tags: \"environment\" and \"role\". A parent group has a filter, environment=\"production\". A child of that parent group has a filter, role=\"transcoder\". The parent group contains all instances in the production environment, regardless of their roles. The child group contains instances that have the transcoder role and are in the production environment.The monitored resources contained in a group can change at any moment, depending on what resources exist and what filters are associated with the group and its ancestors.", - "type": "object" - }, - "Type": { - "properties": { - "fields": { - "description": "The list of fields.", - "type": "array", - "items": { - "$ref": "Field" - } - }, - "name": { - "description": "The fully qualified message name.", - "type": "string" - }, - "oneofs": { - "description": "The list of types appearing in oneof definitions in this type.", - "type": "array", - "items": { - "type": "string" - } - }, - "syntax": { - "enumDescriptions": [ - "Syntax proto2.", - "Syntax proto3." - ], - "enum": [ - "SYNTAX_PROTO2", - "SYNTAX_PROTO3" - ], - "description": "The source syntax.", - "type": "string" - }, - "sourceContext": { - "description": "The source context.", - "$ref": "SourceContext" - }, - "options": { - "description": "The protocol buffer options.", - "type": "array", - "items": { - "$ref": "Option" - } - } - }, - "id": "Type", - "description": "A protocol buffer message type.", - "type": "object" - }, - "BucketOptions": { - "description": "BucketOptions describes the bucket boundaries used to create a histogram for the distribution. The buckets can be in a linear sequence, an exponential sequence, or each bucket can be specified explicitly. BucketOptions does not include the number of values in each bucket.A bucket has an inclusive lower bound and exclusive upper bound for the values that are counted for that bucket. The upper bound of a bucket must be strictly greater than the lower bound. The sequence of N buckets for a distribution consists of an underflow bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets are contiguous: the lower bound of bucket i (i \u003e 0) is the same as the upper bound of bucket i - 1. The buckets span the whole range of finite values: lower bound of the underflow bucket is -infinity and the upper bound of the overflow bucket is +infinity. The finite buckets are so-called because both bounds are finite.", - "type": "object", - "properties": { - "exponentialBuckets": { - "$ref": "Exponential", - "description": "The exponential buckets." - }, - "linearBuckets": { - "description": "The linear bucket.", - "$ref": "Linear" - }, - "explicitBuckets": { - "description": "The explicit buckets.", - "$ref": "Explicit" - } - }, - "id": "BucketOptions" - }, - "CollectdValue": { - "description": "A single data point from a collectd-based plugin.", - "type": "object", - "properties": { - "value": { - "$ref": "TypedValue", - "description": "The measurement value." - }, - "dataSourceType": { - "enumDescriptions": [ - "An unspecified data source type. This corresponds to google.api.MetricDescriptor.MetricKind.METRIC_KIND_UNSPECIFIED.", - "An instantaneous measurement of a varying quantity. This corresponds to google.api.MetricDescriptor.MetricKind.GAUGE.", - "A cumulative value over time. This corresponds to google.api.MetricDescriptor.MetricKind.CUMULATIVE.", - "A rate of change of the measurement.", - "An amount of change since the last measurement interval. This corresponds to google.api.MetricDescriptor.MetricKind.DELTA." - ], - "enum": [ - "UNSPECIFIED_DATA_SOURCE_TYPE", - "GAUGE", - "COUNTER", - "DERIVE", - "ABSOLUTE" - ], - "description": "The type of measurement.", - "type": "string" - }, - "dataSourceName": { - "description": "The data source for the collectd value. For example there are two data sources for network measurements: \"rx\" and \"tx\".", - "type": "string" - } - }, - "id": "CollectdValue" - }, - "MetricDescriptor": { - "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable.", - "type": "object", - "properties": { - "unit": { - "description": "The unit in which the metric value is reported. It is only applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The supported units are a subset of The Unified Code for Units of Measure (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT)\nbit bit\nBy byte\ns second\nmin minute\nh hour\nd dayPrefixes (PREFIX)\nk kilo (10**3)\nM mega (10**6)\nG giga (10**9)\nT tera (10**12)\nP peta (10**15)\nE exa (10**18)\nZ zetta (10**21)\nY yotta (10**24)\nm milli (10**-3)\nu micro (10**-6)\nn nano (10**-9)\np pico (10**-12)\nf femto (10**-15)\na atto (10**-18)\nz zepto (10**-21)\ny yocto (10**-24)\nKi kibi (2**10)\nMi mebi (2**20)\nGi gibi (2**30)\nTi tebi (2**40)GrammarThe grammar includes the dimensionless unit 1, such as 1/s.The grammar also includes these connectors:\n/ division (as an infix operator, e.g. 1/s).\n. multiplication (as an infix operator, e.g. GBy.d)The grammar for a unit is as follows:\nExpression = Component { \".\" Component } { \"/\" Component } ;\n\nComponent = [ PREFIX ] UNIT [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\nAnnotation = \"{\" NAME \"}\" ;\nNotes:\nAnnotation is just a comment if it follows a UNIT and is equivalent to 1 if it is used alone. For examples, {requests}/s == 1/s, By{transmitted}/s == By/s.\nNAME is a sequence of non-blank printable ASCII characters not containing '{' or '}'.", - "type": "string" - }, - "labels": { - "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the appengine.googleapis.com/http/server/response_latencies metric type has a label for the HTTP response code, response_code, so you can look at latencies for successful responses or just for responses that failed.", - "type": "array", - "items": { - "$ref": "LabelDescriptor" - } - }, - "name": { - "description": "The resource name of the metric descriptor. Depending on the implementation, the name typically includes: (1) the parent resource name that defines the scope of the metric type or of its data; and (2) the metric's URL-encoded type, which also appears in the type field of this descriptor. For example, following is the resource name of a custom metric within the GCP project my-project-id:\n\"projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvoice%2Fpaid%2Famount\"\n", - "type": "string" - }, - "type": { - "description": "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined custom metric types have the DNS name custom.googleapis.com. Metric types should use a natural hierarchical grouping. For example:\n\"custom.googleapis.com/invoice/paid/amount\"\n\"appengine.googleapis.com/http/server/response_latencies\"\n", - "type": "string" - }, - "valueType": { - "enum": [ - "VALUE_TYPE_UNSPECIFIED", - "BOOL", - "INT64", - "DOUBLE", - "STRING", - "DISTRIBUTION", - "MONEY" - ], - "description": "Whether the measurement is an integer, a floating-point number, etc. Some combinations of metric_kind and value_type might not be supported.", - "type": "string", - "enumDescriptions": [ - "Do not use this default value.", - "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", - "The value is a signed 64-bit integer.", - "The value is a double precision floating point number.", - "The value is a text string. This value type can be used only if the metric kind is GAUGE.", - "The value is a Distribution.", - "The value is money." - ] - }, - "metricKind": { - "enum": [ - "METRIC_KIND_UNSPECIFIED", - "GAUGE", - "DELTA", - "CUMULATIVE" - ], - "description": "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of metric_kind and value_type might not be supported.", - "type": "string", - "enumDescriptions": [ - "Do not use this default value.", - "An instantaneous measurement of a value.", - "The change in a value during a time interval.", - "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." - ] - }, - "displayName": { - "description": "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\".", - "type": "string" - }, - "description": { - "description": "A detailed description of the metric, which can be used in documentation.", - "type": "string" - } - }, - "id": "MetricDescriptor" - }, - "SourceContext": { - "description": "SourceContext represents information about the source of a protobuf element, like the file in which it is defined.", - "type": "object", - "properties": { - "fileName": { - "description": "The path-qualified name of the .proto file that contained the associated protobuf element. For example: \"google/protobuf/source_context.proto\".", - "type": "string" - } - }, - "id": "SourceContext" - }, - "Range": { - "properties": { - "min": { - "description": "The minimum of the population values.", - "format": "double", - "type": "number" - }, - "max": { - "description": "The maximum of the population values.", - "format": "double", - "type": "number" - } - }, - "id": "Range", - "description": "The range of the population values.", - "type": "object" - }, - "ListGroupsResponse": { - "properties": { - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - }, - "group": { - "description": "The groups that match the specified filters.", - "type": "array", - "items": { - "$ref": "Group" - } - } - }, - "id": "ListGroupsResponse", - "description": "The ListGroups response.", - "type": "object" - }, - "CreateCollectdTimeSeriesRequest": { - "description": "The CreateCollectdTimeSeries request.", - "type": "object", - "properties": { - "resource": { - "$ref": "MonitoredResource", - "description": "The monitored resource associated with the time series." - }, - "collectdPayloads": { - "description": "The collectd payloads representing the time series data. You must not include more than a single point for each time series, so no two payloads can have the same values for all of the fields plugin, plugin_instance, type, and type_instance.", - "type": "array", - "items": { - "$ref": "CollectdPayload" - } - }, - "collectdVersion": { - "description": "The version of collectd that collected the data. Example: \"5.3.0-192.el6\".", - "type": "string" - } - }, - "id": "CreateCollectdTimeSeriesRequest" - }, - "ListGroupMembersResponse": { - "description": "The ListGroupMembers response.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - }, - "totalSize": { - "description": "The total number of elements matching this request.", - "format": "int32", - "type": "integer" - }, - "members": { - "description": "A set of monitored resources in the group.", - "type": "array", - "items": { - "$ref": "MonitoredResource" - } - } - }, - "id": "ListGroupMembersResponse" - }, - "ListMonitoredResourceDescriptorsResponse": { - "properties": { - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - }, - "resourceDescriptors": { - "description": "The monitored resource descriptors that are available to this project and that match filter, if present.", - "type": "array", - "items": { - "$ref": "MonitoredResourceDescriptor" - } - } - }, - "id": "ListMonitoredResourceDescriptorsResponse", - "description": "The ListMonitoredResourceDescriptors response.", - "type": "object" - }, - "TimeSeries": { - "properties": { - "metricKind": { - "enumDescriptions": [ - "Do not use this default value.", - "An instantaneous measurement of a value.", - "The change in a value during a time interval.", - "A value accumulated over a time interval. Cumulative measurements in a time series should have the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points." - ], - "enum": [ - "METRIC_KIND_UNSPECIFIED", - "GAUGE", - "DELTA", - "CUMULATIVE" - ], - "description": "The metric kind of the time series. When listing time series, this metric kind might be different from the metric kind of the associated metric if this time series is an alignment or reduction of other time series.When creating a time series, this field is optional. If present, it must be the same as the metric kind of the associated metric. If the associated metric's descriptor must be auto-created, then this field specifies the metric kind of the new descriptor and must be either GAUGE (the default) or CUMULATIVE.", - "type": "string" - }, - "metric": { - "$ref": "Metric", - "description": "The associated metric. A fully-specified metric used to identify the time series." - }, - "points": { - "description": "The data points of this time series. When listing time series, the order of the points is specified by the list method.When creating a time series, this field must contain exactly one point and the point's type must be the same as the value type of the associated metric. If the associated metric's descriptor must be auto-created, then the value type of the descriptor is determined by the point's type, which must be BOOL, INT64, DOUBLE, or DISTRIBUTION.", - "type": "array", - "items": { - "$ref": "Point" - } - }, - "valueType": { - "enum": [ - "VALUE_TYPE_UNSPECIFIED", - "BOOL", - "INT64", - "DOUBLE", - "STRING", - "DISTRIBUTION", - "MONEY" - ], - "description": "The value type of the time series. When listing time series, this value type might be different from the value type of the associated metric if this time series is an alignment or reduction of other time series.When creating a time series, this field is optional. If present, it must be the same as the type of the data in the points field.", - "type": "string", - "enumDescriptions": [ - "Do not use this default value.", - "The value is a boolean. This value type can be used only if the metric kind is GAUGE.", - "The value is a signed 64-bit integer.", - "The value is a double precision floating point number.", - "The value is a text string. This value type can be used only if the metric kind is GAUGE.", - "The value is a Distribution.", - "The value is money." - ] - }, - "resource": { - "description": "The associated monitored resource. Custom metrics can use only certain monitored resource types in their time series data.", - "$ref": "MonitoredResource" - } - }, - "id": "TimeSeries", - "description": "A collection of data points that describes the time-varying values of a metric. A time series is identified by a combination of a fully-specified monitored resource and a fully-specified metric. This type is used for both listing and creating time series.", - "type": "object" - }, - "CreateTimeSeriesRequest": { - "description": "The CreateTimeSeries request.", - "type": "object", - "properties": { - "timeSeries": { - "description": "The new data to be added to a list of time series. Adds at most one data point to each of several time series. The new data point must be more recent than any other point in its time series. Each TimeSeries value must fully specify a unique time series by supplying all label values for the metric and the monitored resource.", - "type": "array", - "items": { - "$ref": "TimeSeries" - } - } - }, - "id": "CreateTimeSeriesRequest" - }, - "Distribution": { - "description": "Distribution contains summary statistics for a population of values. It optionally contains a histogram representing the distribution of those values across a set of buckets.The summary statistics are the count, mean, sum of the squared deviation from the mean, the minimum, and the maximum of the set of population of values. The histogram is based on a sequence of buckets and gives a count of values that fall into each bucket. The boundaries of the buckets are given either explicitly or by formulas for buckets of fixed or exponentially increasing widths.Although it is not forbidden, it is generally a bad idea to include non-finite values (infinities or NaNs) in the population of values, as this will render the mean and sum_of_squared_deviation fields meaningless.", - "type": "object", - "properties": { - "range": { - "description": "If specified, contains the range of the population values. The field must not be present if the count is zero. This field is presently ignored by the Stackdriver Monitoring API v3.", - "$ref": "Range" - }, - "count": { - "description": "The number of values in the population. Must be non-negative. This value must equal the sum of the values in bucket_counts if a histogram is provided.", - "format": "int64", - "type": "string" - }, - "mean": { - "description": "The arithmetic mean of the values in the population. If count is zero then this field must be zero.", - "format": "double", - "type": "number" - }, - "bucketCounts": { - "description": "Required in the Stackdriver Monitoring API v3. The values for each bucket specified in bucket_options. The sum of the values in bucketCounts must equal the value in the count field of the Distribution object. The order of the bucket counts follows the numbering schemes described for the three bucket types. The underflow bucket has number 0; the finite buckets, if any, have numbers 1 through N-2; and the overflow bucket has number N-1. The size of bucket_counts must not be greater than N. If the size is less than N, then the remaining buckets are assigned values of zero.", - "type": "array", - "items": { - "format": "int64", - "type": "string" - } - }, - "bucketOptions": { - "$ref": "BucketOptions", - "description": "Required in the Stackdriver Monitoring API v3. Defines the histogram bucket boundaries." - }, - "sumOfSquaredDeviation": { - "description": "The sum of squared deviations from the mean of the values in the population. For values x_i this is:\nSum[i=1..n]((x_i - mean)^2)\nKnuth, \"The Art of Computer Programming\", Vol. 2, page 323, 3rd edition describes Welford's method for accumulating this sum in one pass.If count is zero then this field must be zero.", - "format": "double", - "type": "number" - } - }, - "id": "Distribution" - }, - "MonitoredResource": { - "properties": { - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Required. Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels \"project_id\", \"instance_id\", and \"zone\".", - "type": "object" - }, - "type": { - "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Compute Engine VM instance is gce_instance.", - "type": "string" - } - }, - "id": "MonitoredResource", - "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", - "type": "object" - }, - "ListMetricDescriptorsResponse": { - "description": "The ListMetricDescriptors response.", - "type": "object", - "properties": { - "metricDescriptors": { - "description": "The metric descriptors that are available to the project and that match the value of filter, if present.", - "type": "array", - "items": { - "$ref": "MetricDescriptor" - } - }, - "nextPageToken": { - "description": "If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.", - "type": "string" - } - }, - "id": "ListMetricDescriptorsResponse" - }, - "MonitoredResourceDescriptor": { - "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", - "type": "object", - "properties": { - "name": { - "description": "Optional. The resource name of the monitored resource descriptor: \"projects/{project_id}/monitoredResourceDescriptors/{type}\" where {type} is the value of the type field in this object and {project_id} is a project ID that provides API-specific context for accessing the type. APIs that do not use project information can use the resource name format \"monitoredResourceDescriptors/{type}\".", - "type": "string" - }, - "displayName": { - "description": "Optional. A concise name for the monitored resource type that might be displayed in user interfaces. It should be a Title Cased Noun Phrase, without any article or other determiners. For example, \"Google Cloud SQL Database\".", - "type": "string" - }, - "description": { - "description": "Optional. A detailed description of the monitored resource type that might be used in documentation.", - "type": "string" - }, - "type": { - "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. The maximum length of this value is 256 characters.", - "type": "string" - }, - "labels": { - "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", - "type": "array", - "items": { - "$ref": "LabelDescriptor" - } - } - }, - "id": "MonitoredResourceDescriptor" - }, - "TypedValue": { - "properties": { - "int64Value": { - "description": "A 64-bit integer. Its range is approximately ±9.2x10\u003csup\u003e18\u003c/sup\u003e.", - "format": "int64", - "type": "string" - }, - "distributionValue": { - "description": "A distribution value.", - "$ref": "Distribution" - }, - "boolValue": { - "description": "A Boolean value: true or false.", - "type": "boolean" - }, - "stringValue": { - "description": "A variable-length string value.", - "type": "string" - }, - "doubleValue": { - "description": "A 64-bit double-precision floating-point number. Its magnitude is approximately ±10\u003csup\u003e±300\u003c/sup\u003e and it has 16 significant digits of precision.", - "format": "double", - "type": "number" - } - }, - "id": "TypedValue", - "description": "A single strongly-typed value.", - "type": "object" - }, - "CollectdPayload": { - "description": "A collection of data points sent from a collectd-based plugin. See the collectd documentation for more information.", - "type": "object", - "properties": { - "plugin": { - "description": "The name of the plugin. Example: \"disk\".", - "type": "string" - }, - "pluginInstance": { - "description": "The instance name of the plugin Example: \"hdcl\".", - "type": "string" - }, - "endTime": { - "description": "The end time of the interval.", - "format": "google-datetime", - "type": "string" - }, - "startTime": { - "description": "The start time of the interval.", - "format": "google-datetime", - "type": "string" - }, - "values": { - "description": "The measured values during this time interval. Each value must have a different dataSourceName.", - "type": "array", - "items": { - "$ref": "CollectdValue" - } - }, - "typeInstance": { - "description": "The measurement type instance. Example: \"used\".", - "type": "string" - }, - "type": { - "description": "The measurement type. Example: \"memory\".", - "type": "string" - }, - "metadata": { - "additionalProperties": { - "$ref": "TypedValue" - }, - "description": "The measurement metadata. Example: \"process_id\" -\u003e 12345", - "type": "object" - } - }, - "id": "CollectdPayload" - }, - "Linear": { - "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", - "type": "object", - "properties": { - "width": { - "description": "Must be greater than 0.", - "format": "double", - "type": "number" - }, - "offset": { - "description": "Lower bound of the first bucket.", - "format": "double", - "type": "number" - }, - "numFiniteBuckets": { - "description": "Must be greater than 0.", - "format": "int32", - "type": "integer" - } - }, - "id": "Linear" - }, - "Option": { - "properties": { - "name": { - "description": "The option's name. For protobuf built-in options (options defined in descriptor.proto), this is the short name. For example, \"map_entry\". For custom options, it should be the fully-qualified name. For example, \"google.api.http\".", - "type": "string" - }, - "value": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "The option's value packed in an Any message. If the value is a primitive, the corresponding wrapper type defined in google/protobuf/wrappers.proto should be used. If the value is an enum, it should be stored as an int32 value using the google.protobuf.Int32Value type.", - "type": "object" - } - }, - "id": "Option", - "description": "A protocol buffer option, which can be attached to a message, field, enumeration, etc.", - "type": "object" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", - "type": "object", - "properties": {}, - "id": "Empty" - }, - "TimeInterval": { - "description": "A time interval extending just after a start time through an end time. If the start time is the same as the end time, then the interval represents a single point in time.", - "type": "object", - "properties": { - "endTime": { - "description": "Required. The end of the time interval.", - "format": "google-datetime", - "type": "string" - }, - "startTime": { - "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", - "format": "google-datetime", - "type": "string" - } - }, - "id": "TimeInterval" - }, - "Explicit": { - "description": "Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): boundsi Lower bound (1 \u003c= i \u003c N); boundsi - 1The bounds field must contain at least one element. If bounds has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.", - "type": "object", - "properties": { - "bounds": { - "description": "The values must be monotonically increasing.", - "type": "array", - "items": { - "format": "double", - "type": "number" - } - } - }, - "id": "Explicit" - }, - "Exponential": { - "properties": { - "growthFactor": { - "description": "Must be greater than 1.", - "format": "double", - "type": "number" - }, - "scale": { - "description": "Must be greater than 0.", - "format": "double", - "type": "number" - }, - "numFiniteBuckets": { - "description": "Must be greater than 0.", - "format": "int32", - "type": "integer" - } - }, - "id": "Exponential", - "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i). Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", - "type": "object" - }, - "Point": { - "properties": { - "value": { - "$ref": "TypedValue", - "description": "The value of the data point." - }, - "interval": { - "description": "The time interval to which the data point applies. For GAUGE metrics, only the end time of the interval is used. For DELTA metrics, the start and end time should specify a non-zero interval, with subsequent points specifying contiguous and non-overlapping intervals. For CUMULATIVE metrics, the start and end time should specify a non-zero interval, with subsequent points specifying the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points.", - "$ref": "TimeInterval" - } - }, - "id": "Point", - "description": "A single data point in a time series.", - "type": "object" - }, - "Field": { - "properties": { - "jsonName": { - "description": "The field JSON name.", - "type": "string" - }, - "kind": { - "enum": [ - "TYPE_UNKNOWN", - "TYPE_DOUBLE", - "TYPE_FLOAT", - "TYPE_INT64", - "TYPE_UINT64", - "TYPE_INT32", - "TYPE_FIXED64", - "TYPE_FIXED32", - "TYPE_BOOL", - "TYPE_STRING", - "TYPE_GROUP", - "TYPE_MESSAGE", - "TYPE_BYTES", - "TYPE_UINT32", - "TYPE_ENUM", - "TYPE_SFIXED32", - "TYPE_SFIXED64", - "TYPE_SINT32", - "TYPE_SINT64" - ], - "description": "The field type.", - "type": "string", - "enumDescriptions": [ - "Field type unknown.", - "Field type double.", - "Field type float.", - "Field type int64.", - "Field type uint64.", - "Field type int32.", - "Field type fixed64.", - "Field type fixed32.", - "Field type bool.", - "Field type string.", - "Field type group. Proto2 syntax only, and deprecated.", - "Field type message.", - "Field type bytes.", - "Field type uint32.", - "Field type enum.", - "Field type sfixed32.", - "Field type sfixed64.", - "Field type sint32.", - "Field type sint64." - ] - }, - "options": { - "description": "The protocol buffer options.", - "type": "array", - "items": { - "$ref": "Option" - } - }, - "oneofIndex": { - "description": "The index of the field type in Type.oneofs, for message or enumeration types. The first type has index 1; zero means the type is not in the list.", - "format": "int32", - "type": "integer" - }, - "cardinality": { - "enumDescriptions": [ - "For fields with unknown cardinality.", - "For optional fields.", - "For required fields. Proto2 syntax only.", - "For repeated fields." - ], - "enum": [ - "CARDINALITY_UNKNOWN", - "CARDINALITY_OPTIONAL", - "CARDINALITY_REQUIRED", - "CARDINALITY_REPEATED" - ], - "description": "The field cardinality.", - "type": "string" - }, - "packed": { - "description": "Whether to use alternative packed wire representation.", - "type": "boolean" - }, - "defaultValue": { - "description": "The string value of the default value of this field. Proto2 syntax only.", - "type": "string" - }, - "name": { - "description": "The field name.", - "type": "string" - }, - "typeUrl": { - "description": "The field type URL, without the scheme, for message or enumeration types. Example: \"type.googleapis.com/google.protobuf.Timestamp\".", - "type": "string" - }, - "number": { - "description": "The field number.", - "format": "int32", - "type": "integer" - } - }, - "id": "Field", - "description": "A single field of a message type.", - "type": "object" - }, - "Metric": { - "description": "A specific metric, identified by specifying values for all of the labels of a MetricDescriptor.", - "type": "object", - "properties": { - "type": { - "description": "An existing metric type, see google.api.MetricDescriptor. For example, custom.googleapis.com/invoice/paid/amount.", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "The set of label values that uniquely identify this metric. All labels listed in the MetricDescriptor must be assigned values.", - "type": "object" - } - }, - "id": "Metric" - } - }, - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "protocol": "rest", - "canonicalName": "Monitoring", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/monitoring": { - "description": "View and write monitoring data for all of your Google and third-party Cloud and API projects" - }, - "https://www.googleapis.com/auth/monitoring.write": { - "description": "Publish metric data to your Google Cloud projects" - }, - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/monitoring.read": { - "description": "View monitoring data for all of your Google Cloud and third-party projects" - } - } - } - }, - "rootUrl": "https://monitoring.googleapis.com/", - "ownerDomain": "google.com", - "name": "monitoring", - "batchPath": "batch", - "title": "Stackdriver Monitoring API", - "ownerName": "Google" -} diff --git a/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go b/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go deleted file mode 100644 index b8268dfbf82..00000000000 --- a/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go +++ /dev/null @@ -1,4573 +0,0 @@ -// Package monitoring provides access to the Stackdriver Monitoring API. -// -// See https://cloud.google.com/monitoring/api/ -// -// Usage example: -// -// import "google.golang.org/api/monitoring/v3" -// ... -// monitoringService, err := monitoring.New(oauthHttpClient) -package monitoring // import "google.golang.org/api/monitoring/v3" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" - "io" - "net/http" - "net/url" - "strconv" - "strings" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = ctxhttp.Do - -const apiId = "monitoring:v3" -const apiName = "monitoring" -const apiVersion = "v3" -const basePath = "https://monitoring.googleapis.com/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - - // View and write monitoring data for all of your Google and third-party - // Cloud and API projects - MonitoringScope = "https://www.googleapis.com/auth/monitoring" - - // View monitoring data for all of your Google Cloud and third-party - // projects - MonitoringReadScope = "https://www.googleapis.com/auth/monitoring.read" - - // Publish metric data to your Google Cloud projects - MonitoringWriteScope = "https://www.googleapis.com/auth/monitoring.write" -) - -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.Projects = NewProjectsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - Projects *ProjectsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewProjectsService(s *Service) *ProjectsService { - rs := &ProjectsService{s: s} - rs.CollectdTimeSeries = NewProjectsCollectdTimeSeriesService(s) - rs.Groups = NewProjectsGroupsService(s) - rs.MetricDescriptors = NewProjectsMetricDescriptorsService(s) - rs.MonitoredResourceDescriptors = NewProjectsMonitoredResourceDescriptorsService(s) - rs.TimeSeries = NewProjectsTimeSeriesService(s) - return rs -} - -type ProjectsService struct { - s *Service - - CollectdTimeSeries *ProjectsCollectdTimeSeriesService - - Groups *ProjectsGroupsService - - MetricDescriptors *ProjectsMetricDescriptorsService - - MonitoredResourceDescriptors *ProjectsMonitoredResourceDescriptorsService - - TimeSeries *ProjectsTimeSeriesService -} - -func NewProjectsCollectdTimeSeriesService(s *Service) *ProjectsCollectdTimeSeriesService { - rs := &ProjectsCollectdTimeSeriesService{s: s} - return rs -} - -type ProjectsCollectdTimeSeriesService struct { - s *Service -} - -func NewProjectsGroupsService(s *Service) *ProjectsGroupsService { - rs := &ProjectsGroupsService{s: s} - rs.Members = NewProjectsGroupsMembersService(s) - return rs -} - -type ProjectsGroupsService struct { - s *Service - - Members *ProjectsGroupsMembersService -} - -func NewProjectsGroupsMembersService(s *Service) *ProjectsGroupsMembersService { - rs := &ProjectsGroupsMembersService{s: s} - return rs -} - -type ProjectsGroupsMembersService struct { - s *Service -} - -func NewProjectsMetricDescriptorsService(s *Service) *ProjectsMetricDescriptorsService { - rs := &ProjectsMetricDescriptorsService{s: s} - return rs -} - -type ProjectsMetricDescriptorsService struct { - s *Service -} - -func NewProjectsMonitoredResourceDescriptorsService(s *Service) *ProjectsMonitoredResourceDescriptorsService { - rs := &ProjectsMonitoredResourceDescriptorsService{s: s} - return rs -} - -type ProjectsMonitoredResourceDescriptorsService struct { - s *Service -} - -func NewProjectsTimeSeriesService(s *Service) *ProjectsTimeSeriesService { - rs := &ProjectsTimeSeriesService{s: s} - return rs -} - -type ProjectsTimeSeriesService struct { - s *Service -} - -// BucketOptions: BucketOptions describes the bucket boundaries used to -// create a histogram for the distribution. The buckets can be in a -// linear sequence, an exponential sequence, or each bucket can be -// specified explicitly. BucketOptions does not include the number of -// values in each bucket.A bucket has an inclusive lower bound and -// exclusive upper bound for the values that are counted for that -// bucket. The upper bound of a bucket must be strictly greater than the -// lower bound. The sequence of N buckets for a distribution consists of -// an underflow bucket (number 0), zero or more finite buckets (number 1 -// through N - 2) and an overflow bucket (number N - 1). The buckets are -// contiguous: the lower bound of bucket i (i > 0) is the same as the -// upper bound of bucket i - 1. The buckets span the whole range of -// finite values: lower bound of the underflow bucket is -infinity and -// the upper bound of the overflow bucket is +infinity. The finite -// buckets are so-called because both bounds are finite. -type BucketOptions struct { - // ExplicitBuckets: The explicit buckets. - ExplicitBuckets *Explicit `json:"explicitBuckets,omitempty"` - - // ExponentialBuckets: The exponential buckets. - ExponentialBuckets *Exponential `json:"exponentialBuckets,omitempty"` - - // LinearBuckets: The linear bucket. - LinearBuckets *Linear `json:"linearBuckets,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ExplicitBuckets") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ExplicitBuckets") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *BucketOptions) MarshalJSON() ([]byte, error) { - type noMethod BucketOptions - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CollectdPayload: A collection of data points sent from a -// collectd-based plugin. See the collectd documentation for more -// information. -type CollectdPayload struct { - // EndTime: The end time of the interval. - EndTime string `json:"endTime,omitempty"` - - // Metadata: The measurement metadata. Example: "process_id" -> 12345 - Metadata map[string]TypedValue `json:"metadata,omitempty"` - - // Plugin: The name of the plugin. Example: "disk". - Plugin string `json:"plugin,omitempty"` - - // PluginInstance: The instance name of the plugin Example: "hdcl". - PluginInstance string `json:"pluginInstance,omitempty"` - - // StartTime: The start time of the interval. - StartTime string `json:"startTime,omitempty"` - - // Type: The measurement type. Example: "memory". - Type string `json:"type,omitempty"` - - // TypeInstance: The measurement type instance. Example: "used". - TypeInstance string `json:"typeInstance,omitempty"` - - // Values: The measured values during this time interval. Each value - // must have a different dataSourceName. - Values []*CollectdValue `json:"values,omitempty"` - - // ForceSendFields is a list of field names (e.g. "EndTime") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EndTime") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CollectdPayload) MarshalJSON() ([]byte, error) { - type noMethod CollectdPayload - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CollectdValue: A single data point from a collectd-based plugin. -type CollectdValue struct { - // DataSourceName: The data source for the collectd value. For example - // there are two data sources for network measurements: "rx" and "tx". - DataSourceName string `json:"dataSourceName,omitempty"` - - // DataSourceType: The type of measurement. - // - // Possible values: - // "UNSPECIFIED_DATA_SOURCE_TYPE" - An unspecified data source type. - // This corresponds to - // google.api.MetricDescriptor.MetricKind.METRIC_KIND_UNSPECIFIED. - // "GAUGE" - An instantaneous measurement of a varying quantity. This - // corresponds to google.api.MetricDescriptor.MetricKind.GAUGE. - // "COUNTER" - A cumulative value over time. This corresponds to - // google.api.MetricDescriptor.MetricKind.CUMULATIVE. - // "DERIVE" - A rate of change of the measurement. - // "ABSOLUTE" - An amount of change since the last measurement - // interval. This corresponds to - // google.api.MetricDescriptor.MetricKind.DELTA. - DataSourceType string `json:"dataSourceType,omitempty"` - - // Value: The measurement value. - Value *TypedValue `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DataSourceName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DataSourceName") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *CollectdValue) MarshalJSON() ([]byte, error) { - type noMethod CollectdValue - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CreateCollectdTimeSeriesRequest: The CreateCollectdTimeSeries -// request. -type CreateCollectdTimeSeriesRequest struct { - // CollectdPayloads: The collectd payloads representing the time series - // data. You must not include more than a single point for each time - // series, so no two payloads can have the same values for all of the - // fields plugin, plugin_instance, type, and type_instance. - CollectdPayloads []*CollectdPayload `json:"collectdPayloads,omitempty"` - - // CollectdVersion: The version of collectd that collected the data. - // Example: "5.3.0-192.el6". - CollectdVersion string `json:"collectdVersion,omitempty"` - - // Resource: The monitored resource associated with the time series. - Resource *MonitoredResource `json:"resource,omitempty"` - - // ForceSendFields is a list of field names (e.g. "CollectdPayloads") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CollectdPayloads") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *CreateCollectdTimeSeriesRequest) MarshalJSON() ([]byte, error) { - type noMethod CreateCollectdTimeSeriesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CreateTimeSeriesRequest: The CreateTimeSeries request. -type CreateTimeSeriesRequest struct { - // TimeSeries: The new data to be added to a list of time series. Adds - // at most one data point to each of several time series. The new data - // point must be more recent than any other point in its time series. - // Each TimeSeries value must fully specify a unique time series by - // supplying all label values for the metric and the monitored resource. - TimeSeries []*TimeSeries `json:"timeSeries,omitempty"` - - // ForceSendFields is a list of field names (e.g. "TimeSeries") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TimeSeries") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CreateTimeSeriesRequest) MarshalJSON() ([]byte, error) { - type noMethod CreateTimeSeriesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Distribution: Distribution contains summary statistics for a -// population of values. It optionally contains a histogram representing -// the distribution of those values across a set of buckets.The summary -// statistics are the count, mean, sum of the squared deviation from the -// mean, the minimum, and the maximum of the set of population of -// values. The histogram is based on a sequence of buckets and gives a -// count of values that fall into each bucket. The boundaries of the -// buckets are given either explicitly or by formulas for buckets of -// fixed or exponentially increasing widths.Although it is not -// forbidden, it is generally a bad idea to include non-finite values -// (infinities or NaNs) in the population of values, as this will render -// the mean and sum_of_squared_deviation fields meaningless. -type Distribution struct { - // BucketCounts: Required in the Stackdriver Monitoring API v3. The - // values for each bucket specified in bucket_options. The sum of the - // values in bucketCounts must equal the value in the count field of the - // Distribution object. The order of the bucket counts follows the - // numbering schemes described for the three bucket types. The underflow - // bucket has number 0; the finite buckets, if any, have numbers 1 - // through N-2; and the overflow bucket has number N-1. The size of - // bucket_counts must not be greater than N. If the size is less than N, - // then the remaining buckets are assigned values of zero. - BucketCounts googleapi.Int64s `json:"bucketCounts,omitempty"` - - // BucketOptions: Required in the Stackdriver Monitoring API v3. Defines - // the histogram bucket boundaries. - BucketOptions *BucketOptions `json:"bucketOptions,omitempty"` - - // Count: The number of values in the population. Must be non-negative. - // This value must equal the sum of the values in bucket_counts if a - // histogram is provided. - Count int64 `json:"count,omitempty,string"` - - // Mean: The arithmetic mean of the values in the population. If count - // is zero then this field must be zero. - Mean float64 `json:"mean,omitempty"` - - // Range: If specified, contains the range of the population values. The - // field must not be present if the count is zero. This field is - // presently ignored by the Stackdriver Monitoring API v3. - Range *Range `json:"range,omitempty"` - - // SumOfSquaredDeviation: The sum of squared deviations from the mean of - // the values in the population. For values x_i this - // is: - // Sum[i=1..n]((x_i - mean)^2) - // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd - // edition describes Welford's method for accumulating this sum in one - // pass.If count is zero then this field must be zero. - SumOfSquaredDeviation float64 `json:"sumOfSquaredDeviation,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BucketCounts") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BucketCounts") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Distribution) MarshalJSON() ([]byte, error) { - type noMethod Distribution - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *Distribution) UnmarshalJSON(data []byte) error { - type noMethod Distribution - var s1 struct { - Mean gensupport.JSONFloat64 `json:"mean"` - SumOfSquaredDeviation gensupport.JSONFloat64 `json:"sumOfSquaredDeviation"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Mean = float64(s1.Mean) - s.SumOfSquaredDeviation = float64(s1.SumOfSquaredDeviation) - return nil -} - -// Empty: A generic empty message that you can re-use to avoid defining -// duplicated empty messages in your APIs. A typical example is to use -// it as the request or the response type of an API method. For -// instance: -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// The JSON representation for Empty is empty JSON object {}. -type Empty struct { - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` -} - -// Explicit: Specifies a set of buckets with arbitrary widths.There are -// size(bounds) + 1 (= N) buckets. Bucket i has the following -// boundaries:Upper bound (0 <= i < N-1): boundsi Lower bound (1 <= i < -// N); boundsi - 1The bounds field must contain at least one element. If -// bounds has only one element, then there are no finite buckets, and -// that single element is the common boundary of the overflow and -// underflow buckets. -type Explicit struct { - // Bounds: The values must be monotonically increasing. - Bounds []float64 `json:"bounds,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Bounds") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Bounds") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Explicit) MarshalJSON() ([]byte, error) { - type noMethod Explicit - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Exponential: Specifies an exponential sequence of buckets that have a -// width that is proportional to the value of the lower bound. Each -// bucket represents a constant relative uncertainty on a specific value -// in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket -// i has the following boundaries:Upper bound (0 <= i < N-1): scale * -// (growth_factor ^ i). Lower bound (1 <= i < N): scale * -// (growth_factor ^ (i - 1)). -type Exponential struct { - // GrowthFactor: Must be greater than 1. - GrowthFactor float64 `json:"growthFactor,omitempty"` - - // NumFiniteBuckets: Must be greater than 0. - NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"` - - // Scale: Must be greater than 0. - Scale float64 `json:"scale,omitempty"` - - // ForceSendFields is a list of field names (e.g. "GrowthFactor") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "GrowthFactor") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Exponential) MarshalJSON() ([]byte, error) { - type noMethod Exponential - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *Exponential) UnmarshalJSON(data []byte) error { - type noMethod Exponential - var s1 struct { - GrowthFactor gensupport.JSONFloat64 `json:"growthFactor"` - Scale gensupport.JSONFloat64 `json:"scale"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.GrowthFactor = float64(s1.GrowthFactor) - s.Scale = float64(s1.Scale) - return nil -} - -// Field: A single field of a message type. -type Field struct { - // Cardinality: The field cardinality. - // - // Possible values: - // "CARDINALITY_UNKNOWN" - For fields with unknown cardinality. - // "CARDINALITY_OPTIONAL" - For optional fields. - // "CARDINALITY_REQUIRED" - For required fields. Proto2 syntax only. - // "CARDINALITY_REPEATED" - For repeated fields. - Cardinality string `json:"cardinality,omitempty"` - - // DefaultValue: The string value of the default value of this field. - // Proto2 syntax only. - DefaultValue string `json:"defaultValue,omitempty"` - - // JsonName: The field JSON name. - JsonName string `json:"jsonName,omitempty"` - - // Kind: The field type. - // - // Possible values: - // "TYPE_UNKNOWN" - Field type unknown. - // "TYPE_DOUBLE" - Field type double. - // "TYPE_FLOAT" - Field type float. - // "TYPE_INT64" - Field type int64. - // "TYPE_UINT64" - Field type uint64. - // "TYPE_INT32" - Field type int32. - // "TYPE_FIXED64" - Field type fixed64. - // "TYPE_FIXED32" - Field type fixed32. - // "TYPE_BOOL" - Field type bool. - // "TYPE_STRING" - Field type string. - // "TYPE_GROUP" - Field type group. Proto2 syntax only, and - // deprecated. - // "TYPE_MESSAGE" - Field type message. - // "TYPE_BYTES" - Field type bytes. - // "TYPE_UINT32" - Field type uint32. - // "TYPE_ENUM" - Field type enum. - // "TYPE_SFIXED32" - Field type sfixed32. - // "TYPE_SFIXED64" - Field type sfixed64. - // "TYPE_SINT32" - Field type sint32. - // "TYPE_SINT64" - Field type sint64. - Kind string `json:"kind,omitempty"` - - // Name: The field name. - Name string `json:"name,omitempty"` - - // Number: The field number. - Number int64 `json:"number,omitempty"` - - // OneofIndex: The index of the field type in Type.oneofs, for message - // or enumeration types. The first type has index 1; zero means the type - // is not in the list. - OneofIndex int64 `json:"oneofIndex,omitempty"` - - // Options: The protocol buffer options. - Options []*Option `json:"options,omitempty"` - - // Packed: Whether to use alternative packed wire representation. - Packed bool `json:"packed,omitempty"` - - // TypeUrl: The field type URL, without the scheme, for message or - // enumeration types. Example: - // "type.googleapis.com/google.protobuf.Timestamp". - TypeUrl string `json:"typeUrl,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Cardinality") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Cardinality") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Field) MarshalJSON() ([]byte, error) { - type noMethod Field - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Group: The description of a dynamic collection of monitored -// resources. Each group has a filter that is matched against monitored -// resources and their associated metadata. If a group's filter matches -// an available monitored resource, then that resource is a member of -// that group. Groups can contain any number of monitored resources, and -// each monitored resource can be a member of any number of -// groups.Groups can be nested in parent-child hierarchies. The -// parentName field identifies an optional parent for each group. If a -// group has a parent, then the only monitored resources available to be -// matched by the group's filter are the resources contained in the -// parent group. In other words, a group contains the monitored -// resources that match its filter and the filters of all the group's -// ancestors. A group without a parent can contain any monitored -// resource.For example, consider an infrastructure running a set of -// instances with two user-defined tags: "environment" and "role". A -// parent group has a filter, environment="production". A child of that -// parent group has a filter, role="transcoder". The parent group -// contains all instances in the production environment, regardless of -// their roles. The child group contains instances that have the -// transcoder role and are in the production environment.The monitored -// resources contained in a group can change at any moment, depending on -// what resources exist and what filters are associated with the group -// and its ancestors. -type Group struct { - // DisplayName: A user-assigned name for this group, used only for - // display purposes. - DisplayName string `json:"displayName,omitempty"` - - // Filter: The filter used to determine which monitored resources belong - // to this group. - Filter string `json:"filter,omitempty"` - - // IsCluster: If true, the members of this group are considered to be a - // cluster. The system can perform additional analysis on groups that - // are clusters. - IsCluster bool `json:"isCluster,omitempty"` - - // Name: Output only. The name of this group. The format is - // "projects/{project_id_or_number}/groups/{group_id}". When creating a - // group, this field is ignored and a new name is created consisting of - // the project specified in the call to CreateGroup and a unique - // {group_id} that is generated automatically. - Name string `json:"name,omitempty"` - - // ParentName: The name of the group's parent, if it has one. The format - // is "projects/{project_id_or_number}/groups/{group_id}". For groups - // with no parent, parentName is the empty string, "". - ParentName string `json:"parentName,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "DisplayName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DisplayName") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Group) MarshalJSON() ([]byte, error) { - type noMethod Group - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LabelDescriptor: A description of a label. -type LabelDescriptor struct { - // Description: A human-readable description for the label. - Description string `json:"description,omitempty"` - - // Key: The label key. - Key string `json:"key,omitempty"` - - // ValueType: The type of data that can be assigned to the label. - // - // Possible values: - // "STRING" - A variable-length string. This is the default. - // "BOOL" - Boolean; true or false. - // "INT64" - A 64-bit signed integer. - ValueType string `json:"valueType,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { - type noMethod LabelDescriptor - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Linear: Specifies a linear sequence of buckets that all have the same -// width (except overflow and underflow). Each bucket represents a -// constant absolute uncertainty on the specific value in the -// bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has -// the following boundaries:Upper bound (0 <= i < N-1): offset + (width -// * i). Lower bound (1 <= i < N): offset + (width * (i - 1)). -type Linear struct { - // NumFiniteBuckets: Must be greater than 0. - NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"` - - // Offset: Lower bound of the first bucket. - Offset float64 `json:"offset,omitempty"` - - // Width: Must be greater than 0. - Width float64 `json:"width,omitempty"` - - // ForceSendFields is a list of field names (e.g. "NumFiniteBuckets") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NumFiniteBuckets") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Linear) MarshalJSON() ([]byte, error) { - type noMethod Linear - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *Linear) UnmarshalJSON(data []byte) error { - type noMethod Linear - var s1 struct { - Offset gensupport.JSONFloat64 `json:"offset"` - Width gensupport.JSONFloat64 `json:"width"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Offset = float64(s1.Offset) - s.Width = float64(s1.Width) - return nil -} - -// ListGroupMembersResponse: The ListGroupMembers response. -type ListGroupMembersResponse struct { - // Members: A set of monitored resources in the group. - Members []*MonitoredResource `json:"members,omitempty"` - - // NextPageToken: If there are more results than have been returned, - // then this field is set to a non-empty value. To see the additional - // results, use that value as pageToken in the next call to this method. - NextPageToken string `json:"nextPageToken,omitempty"` - - // TotalSize: The total number of elements matching this request. - TotalSize int64 `json:"totalSize,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Members") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Members") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListGroupMembersResponse) MarshalJSON() ([]byte, error) { - type noMethod ListGroupMembersResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListGroupsResponse: The ListGroups response. -type ListGroupsResponse struct { - // Group: The groups that match the specified filters. - Group []*Group `json:"group,omitempty"` - - // NextPageToken: If there are more results than have been returned, - // then this field is set to a non-empty value. To see the additional - // results, use that value as pageToken in the next call to this method. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Group") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Group") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListGroupsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListGroupsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListMetricDescriptorsResponse: The ListMetricDescriptors response. -type ListMetricDescriptorsResponse struct { - // MetricDescriptors: The metric descriptors that are available to the - // project and that match the value of filter, if present. - MetricDescriptors []*MetricDescriptor `json:"metricDescriptors,omitempty"` - - // NextPageToken: If there are more results than have been returned, - // then this field is set to a non-empty value. To see the additional - // results, use that value as pageToken in the next call to this method. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "MetricDescriptors") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MetricDescriptors") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ListMetricDescriptorsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListMetricDescriptorsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListMonitoredResourceDescriptorsResponse: The -// ListMonitoredResourceDescriptors response. -type ListMonitoredResourceDescriptorsResponse struct { - // NextPageToken: If there are more results than have been returned, - // then this field is set to a non-empty value. To see the additional - // results, use that value as pageToken in the next call to this method. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ResourceDescriptors: The monitored resource descriptors that are - // available to this project and that match filter, if present. - ResourceDescriptors []*MonitoredResourceDescriptor `json:"resourceDescriptors,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NextPageToken") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListMonitoredResourceDescriptorsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListMonitoredResourceDescriptorsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListTimeSeriesResponse: The ListTimeSeries response. -type ListTimeSeriesResponse struct { - // NextPageToken: If there are more results than have been returned, - // then this field is set to a non-empty value. To see the additional - // results, use that value as pageToken in the next call to this method. - NextPageToken string `json:"nextPageToken,omitempty"` - - // TimeSeries: One or more time series that match the filter included in - // the request. - TimeSeries []*TimeSeries `json:"timeSeries,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NextPageToken") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListTimeSeriesResponse) MarshalJSON() ([]byte, error) { - type noMethod ListTimeSeriesResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Metric: A specific metric, identified by specifying values for all of -// the labels of a MetricDescriptor. -type Metric struct { - // Labels: The set of label values that uniquely identify this metric. - // All labels listed in the MetricDescriptor must be assigned values. - Labels map[string]string `json:"labels,omitempty"` - - // Type: An existing metric type, see google.api.MetricDescriptor. For - // example, custom.googleapis.com/invoice/paid/amount. - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Labels") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Metric) MarshalJSON() ([]byte, error) { - type noMethod Metric - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MetricDescriptor: Defines a metric type and its schema. Once a metric -// descriptor is created, deleting or altering it stops data collection -// and makes the metric type's existing data unusable. -type MetricDescriptor struct { - // Description: A detailed description of the metric, which can be used - // in documentation. - Description string `json:"description,omitempty"` - - // DisplayName: A concise name for the metric, which can be displayed in - // user interfaces. Use sentence case without an ending period, for - // example "Request count". - DisplayName string `json:"displayName,omitempty"` - - // Labels: The set of labels that can be used to describe a specific - // instance of this metric type. For example, the - // appengine.googleapis.com/http/server/response_latencies metric type - // has a label for the HTTP response code, response_code, so you can - // look at latencies for successful responses or just for responses that - // failed. - Labels []*LabelDescriptor `json:"labels,omitempty"` - - // MetricKind: Whether the metric records instantaneous values, changes - // to a value, etc. Some combinations of metric_kind and value_type - // might not be supported. - // - // Possible values: - // "METRIC_KIND_UNSPECIFIED" - Do not use this default value. - // "GAUGE" - An instantaneous measurement of a value. - // "DELTA" - The change in a value during a time interval. - // "CUMULATIVE" - A value accumulated over a time interval. Cumulative - // measurements in a time series should have the same start time and - // increasing end times, until an event resets the cumulative value to - // zero and sets a new start time for the following points. - MetricKind string `json:"metricKind,omitempty"` - - // Name: The resource name of the metric descriptor. Depending on the - // implementation, the name typically includes: (1) the parent resource - // name that defines the scope of the metric type or of its data; and - // (2) the metric's URL-encoded type, which also appears in the type - // field of this descriptor. For example, following is the resource name - // of a custom metric within the GCP project - // my-project-id: - // "projects/my-project-id/metricDescriptors/custom.google - // apis.com%2Finvoice%2Fpaid%2Famount" - // - Name string `json:"name,omitempty"` - - // Type: The metric type, including its DNS name prefix. The type is not - // URL-encoded. All user-defined custom metric types have the DNS name - // custom.googleapis.com. Metric types should use a natural hierarchical - // grouping. For - // example: - // "custom.googleapis.com/invoice/paid/amount" - // "appengine.google - // apis.com/http/server/response_latencies" - // - Type string `json:"type,omitempty"` - - // Unit: The unit in which the metric value is reported. It is only - // applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The - // supported units are a subset of The Unified Code for Units of Measure - // (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT) - // bit bit - // By byte - // s second - // min minute - // h hour - // d dayPrefixes (PREFIX) - // k kilo (10**3) - // M mega (10**6) - // G giga (10**9) - // T tera (10**12) - // P peta (10**15) - // E exa (10**18) - // Z zetta (10**21) - // Y yotta (10**24) - // m milli (10**-3) - // u micro (10**-6) - // n nano (10**-9) - // p pico (10**-12) - // f femto (10**-15) - // a atto (10**-18) - // z zepto (10**-21) - // y yocto (10**-24) - // Ki kibi (2**10) - // Mi mebi (2**20) - // Gi gibi (2**30) - // Ti tebi (2**40)GrammarThe grammar includes the dimensionless unit 1, - // such as 1/s.The grammar also includes these connectors: - // / division (as an infix operator, e.g. 1/s). - // . multiplication (as an infix operator, e.g. GBy.d)The grammar for a - // unit is as follows: - // Expression = Component { "." Component } { "/" Component } - // ; - // - // Component = [ PREFIX ] UNIT [ Annotation ] - // | Annotation - // | "1" - // ; - // - // Annotation = "{" NAME "}" ; - // Notes: - // Annotation is just a comment if it follows a UNIT and is equivalent - // to 1 if it is used alone. For examples, {requests}/s == 1/s, - // By{transmitted}/s == By/s. - // NAME is a sequence of non-blank printable ASCII characters not - // containing '{' or '}'. - Unit string `json:"unit,omitempty"` - - // ValueType: Whether the measurement is an integer, a floating-point - // number, etc. Some combinations of metric_kind and value_type might - // not be supported. - // - // Possible values: - // "VALUE_TYPE_UNSPECIFIED" - Do not use this default value. - // "BOOL" - The value is a boolean. This value type can be used only - // if the metric kind is GAUGE. - // "INT64" - The value is a signed 64-bit integer. - // "DOUBLE" - The value is a double precision floating point number. - // "STRING" - The value is a text string. This value type can be used - // only if the metric kind is GAUGE. - // "DISTRIBUTION" - The value is a Distribution. - // "MONEY" - The value is money. - ValueType string `json:"valueType,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { - type noMethod MetricDescriptor - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MonitoredResource: An object representing a resource that can be used -// for monitoring, logging, billing, or other purposes. Examples include -// virtual machine instances, databases, and storage devices such as -// disks. The type field identifies a MonitoredResourceDescriptor object -// that describes the resource's schema. Information in the labels field -// identifies the actual resource and its attributes according to the -// schema. For example, a particular Compute Engine VM instance could be -// represented by the following object, because the -// MonitoredResourceDescriptor for "gce_instance" has labels -// "instance_id" and "zone": -// { "type": "gce_instance", -// "labels": { "instance_id": "12345678901234", -// "zone": "us-central1-a" }} -// -type MonitoredResource struct { - // Labels: Required. Values for all of the labels listed in the - // associated monitored resource descriptor. For example, Compute Engine - // VM instances use the labels "project_id", "instance_id", and "zone". - Labels map[string]string `json:"labels,omitempty"` - - // Type: Required. The monitored resource type. This field must match - // the type field of a MonitoredResourceDescriptor object. For example, - // the type of a Compute Engine VM instance is gce_instance. - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Labels") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MonitoredResource) MarshalJSON() ([]byte, error) { - type noMethod MonitoredResource - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// MonitoredResourceDescriptor: An object that describes the schema of a -// MonitoredResource object using a type name and a set of labels. For -// example, the monitored resource descriptor for Google Compute Engine -// VM instances has a type of "gce_instance" and specifies the use of -// the labels "instance_id" and "zone" to identify particular VM -// instances.Different APIs can support different monitored resource -// types. APIs generally provide a list method that returns the -// monitored resource descriptors used by the API. -type MonitoredResourceDescriptor struct { - // Description: Optional. A detailed description of the monitored - // resource type that might be used in documentation. - Description string `json:"description,omitempty"` - - // DisplayName: Optional. A concise name for the monitored resource type - // that might be displayed in user interfaces. It should be a Title - // Cased Noun Phrase, without any article or other determiners. For - // example, "Google Cloud SQL Database". - DisplayName string `json:"displayName,omitempty"` - - // Labels: Required. A set of labels used to describe instances of this - // monitored resource type. For example, an individual Google Cloud SQL - // database is identified by values for the labels "database_id" and - // "zone". - Labels []*LabelDescriptor `json:"labels,omitempty"` - - // Name: Optional. The resource name of the monitored resource - // descriptor: - // "projects/{project_id}/monitoredResourceDescriptors/{type}" where - // {type} is the value of the type field in this object and {project_id} - // is a project ID that provides API-specific context for accessing the - // type. APIs that do not use project information can use the resource - // name format "monitoredResourceDescriptors/{type}". - Name string `json:"name,omitempty"` - - // Type: Required. The monitored resource type. For example, the type - // "cloudsql_database" represents databases in Google Cloud SQL. The - // maximum length of this value is 256 characters. - Type string `json:"type,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) { - type noMethod MonitoredResourceDescriptor - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Option: A protocol buffer option, which can be attached to a message, -// field, enumeration, etc. -type Option struct { - // Name: The option's name. For protobuf built-in options (options - // defined in descriptor.proto), this is the short name. For example, - // "map_entry". For custom options, it should be the fully-qualified - // name. For example, "google.api.http". - Name string `json:"name,omitempty"` - - // Value: The option's value packed in an Any message. If the value is a - // primitive, the corresponding wrapper type defined in - // google/protobuf/wrappers.proto should be used. If the value is an - // enum, it should be stored as an int32 value using the - // google.protobuf.Int32Value type. - Value googleapi.RawMessage `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Name") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Name") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Option) MarshalJSON() ([]byte, error) { - type noMethod Option - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Point: A single data point in a time series. -type Point struct { - // Interval: The time interval to which the data point applies. For - // GAUGE metrics, only the end time of the interval is used. For DELTA - // metrics, the start and end time should specify a non-zero interval, - // with subsequent points specifying contiguous and non-overlapping - // intervals. For CUMULATIVE metrics, the start and end time should - // specify a non-zero interval, with subsequent points specifying the - // same start time and increasing end times, until an event resets the - // cumulative value to zero and sets a new start time for the following - // points. - Interval *TimeInterval `json:"interval,omitempty"` - - // Value: The value of the data point. - Value *TypedValue `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Interval") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Interval") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Point) MarshalJSON() ([]byte, error) { - type noMethod Point - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Range: The range of the population values. -type Range struct { - // Max: The maximum of the population values. - Max float64 `json:"max,omitempty"` - - // Min: The minimum of the population values. - Min float64 `json:"min,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Max") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Max") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Range) MarshalJSON() ([]byte, error) { - type noMethod Range - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *Range) UnmarshalJSON(data []byte) error { - type noMethod Range - var s1 struct { - Max gensupport.JSONFloat64 `json:"max"` - Min gensupport.JSONFloat64 `json:"min"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Max = float64(s1.Max) - s.Min = float64(s1.Min) - return nil -} - -// SourceContext: SourceContext represents information about the source -// of a protobuf element, like the file in which it is defined. -type SourceContext struct { - // FileName: The path-qualified name of the .proto file that contained - // the associated protobuf element. For example: - // "google/protobuf/source_context.proto". - FileName string `json:"fileName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "FileName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "FileName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SourceContext) MarshalJSON() ([]byte, error) { - type noMethod SourceContext - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TimeInterval: A time interval extending just after a start time -// through an end time. If the start time is the same as the end time, -// then the interval represents a single point in time. -type TimeInterval struct { - // EndTime: Required. The end of the time interval. - EndTime string `json:"endTime,omitempty"` - - // StartTime: Optional. The beginning of the time interval. The default - // value for the start time is the end time. The start time must not be - // later than the end time. - StartTime string `json:"startTime,omitempty"` - - // ForceSendFields is a list of field names (e.g. "EndTime") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EndTime") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TimeInterval) MarshalJSON() ([]byte, error) { - type noMethod TimeInterval - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TimeSeries: A collection of data points that describes the -// time-varying values of a metric. A time series is identified by a -// combination of a fully-specified monitored resource and a -// fully-specified metric. This type is used for both listing and -// creating time series. -type TimeSeries struct { - // Metric: The associated metric. A fully-specified metric used to - // identify the time series. - Metric *Metric `json:"metric,omitempty"` - - // MetricKind: The metric kind of the time series. When listing time - // series, this metric kind might be different from the metric kind of - // the associated metric if this time series is an alignment or - // reduction of other time series.When creating a time series, this - // field is optional. If present, it must be the same as the metric kind - // of the associated metric. If the associated metric's descriptor must - // be auto-created, then this field specifies the metric kind of the new - // descriptor and must be either GAUGE (the default) or CUMULATIVE. - // - // Possible values: - // "METRIC_KIND_UNSPECIFIED" - Do not use this default value. - // "GAUGE" - An instantaneous measurement of a value. - // "DELTA" - The change in a value during a time interval. - // "CUMULATIVE" - A value accumulated over a time interval. Cumulative - // measurements in a time series should have the same start time and - // increasing end times, until an event resets the cumulative value to - // zero and sets a new start time for the following points. - MetricKind string `json:"metricKind,omitempty"` - - // Points: The data points of this time series. When listing time - // series, the order of the points is specified by the list method.When - // creating a time series, this field must contain exactly one point and - // the point's type must be the same as the value type of the associated - // metric. If the associated metric's descriptor must be auto-created, - // then the value type of the descriptor is determined by the point's - // type, which must be BOOL, INT64, DOUBLE, or DISTRIBUTION. - Points []*Point `json:"points,omitempty"` - - // Resource: The associated monitored resource. Custom metrics can use - // only certain monitored resource types in their time series data. - Resource *MonitoredResource `json:"resource,omitempty"` - - // ValueType: The value type of the time series. When listing time - // series, this value type might be different from the value type of the - // associated metric if this time series is an alignment or reduction of - // other time series.When creating a time series, this field is - // optional. If present, it must be the same as the type of the data in - // the points field. - // - // Possible values: - // "VALUE_TYPE_UNSPECIFIED" - Do not use this default value. - // "BOOL" - The value is a boolean. This value type can be used only - // if the metric kind is GAUGE. - // "INT64" - The value is a signed 64-bit integer. - // "DOUBLE" - The value is a double precision floating point number. - // "STRING" - The value is a text string. This value type can be used - // only if the metric kind is GAUGE. - // "DISTRIBUTION" - The value is a Distribution. - // "MONEY" - The value is money. - ValueType string `json:"valueType,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Metric") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Metric") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TimeSeries) MarshalJSON() ([]byte, error) { - type noMethod TimeSeries - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Type: A protocol buffer message type. -type Type struct { - // Fields: The list of fields. - Fields []*Field `json:"fields,omitempty"` - - // Name: The fully qualified message name. - Name string `json:"name,omitempty"` - - // Oneofs: The list of types appearing in oneof definitions in this - // type. - Oneofs []string `json:"oneofs,omitempty"` - - // Options: The protocol buffer options. - Options []*Option `json:"options,omitempty"` - - // SourceContext: The source context. - SourceContext *SourceContext `json:"sourceContext,omitempty"` - - // Syntax: The source syntax. - // - // Possible values: - // "SYNTAX_PROTO2" - Syntax proto2. - // "SYNTAX_PROTO3" - Syntax proto3. - Syntax string `json:"syntax,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fields") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fields") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Type) MarshalJSON() ([]byte, error) { - type noMethod Type - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TypedValue: A single strongly-typed value. -type TypedValue struct { - // BoolValue: A Boolean value: true or false. - BoolValue *bool `json:"boolValue,omitempty"` - - // DistributionValue: A distribution value. - DistributionValue *Distribution `json:"distributionValue,omitempty"` - - // DoubleValue: A 64-bit double-precision floating-point number. Its - // magnitude is approximately ±10±300 and it - // has 16 significant digits of precision. - DoubleValue *float64 `json:"doubleValue,omitempty"` - - // Int64Value: A 64-bit integer. Its range is approximately - // ±9.2x1018. - Int64Value *int64 `json:"int64Value,omitempty,string"` - - // StringValue: A variable-length string value. - StringValue *string `json:"stringValue,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BoolValue") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BoolValue") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TypedValue) MarshalJSON() ([]byte, error) { - type noMethod TypedValue - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *TypedValue) UnmarshalJSON(data []byte) error { - type noMethod TypedValue - var s1 struct { - DoubleValue *gensupport.JSONFloat64 `json:"doubleValue"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - if s1.DoubleValue != nil { - s.DoubleValue = (*float64)(s1.DoubleValue) - } - return nil -} - -// method id "monitoring.projects.collectdTimeSeries.create": - -type ProjectsCollectdTimeSeriesCreateCall struct { - s *Service - name string - createcollectdtimeseriesrequest *CreateCollectdTimeSeriesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Stackdriver Monitoring Agent only: Creates a new time -// series. -func (r *ProjectsCollectdTimeSeriesService) Create(name string, createcollectdtimeseriesrequest *CreateCollectdTimeSeriesRequest) *ProjectsCollectdTimeSeriesCreateCall { - c := &ProjectsCollectdTimeSeriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.createcollectdtimeseriesrequest = createcollectdtimeseriesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsCollectdTimeSeriesCreateCall) Fields(s ...googleapi.Field) *ProjectsCollectdTimeSeriesCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsCollectdTimeSeriesCreateCall) Context(ctx context.Context) *ProjectsCollectdTimeSeriesCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsCollectdTimeSeriesCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsCollectdTimeSeriesCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.createcollectdtimeseriesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/collectdTimeSeries") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.collectdTimeSeries.create" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsCollectdTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Stackdriver Monitoring Agent only: Creates a new time series.\u003caside class=\"caution\"\u003eThis method is only for use by the Stackdriver Monitoring Agent. Use projects.timeSeries.create instead.\u003c/aside\u003e", - // "flatPath": "v3/projects/{projectsId}/collectdTimeSeries", - // "httpMethod": "POST", - // "id": "monitoring.projects.collectdTimeSeries.create", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The project in which to create the time series. The format is \"projects/PROJECT_ID_OR_NUMBER\".", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+name}/collectdTimeSeries", - // "request": { - // "$ref": "CreateCollectdTimeSeriesRequest" - // }, - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.write" - // ] - // } - -} - -// method id "monitoring.projects.groups.create": - -type ProjectsGroupsCreateCall struct { - s *Service - name string - group *Group - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Creates a new group. -func (r *ProjectsGroupsService) Create(name string, group *Group) *ProjectsGroupsCreateCall { - c := &ProjectsGroupsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.group = group - return c -} - -// ValidateOnly sets the optional parameter "validateOnly": If true, -// validate this request but do not create the group. -func (c *ProjectsGroupsCreateCall) ValidateOnly(validateOnly bool) *ProjectsGroupsCreateCall { - c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsGroupsCreateCall) Fields(s ...googleapi.Field) *ProjectsGroupsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsGroupsCreateCall) Context(ctx context.Context) *ProjectsGroupsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsGroupsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsGroupsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/groups") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.groups.create" call. -// Exactly one of *Group or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Group.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGroupsCreateCall) Do(opts ...googleapi.CallOption) (*Group, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Group{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new group.", - // "flatPath": "v3/projects/{projectsId}/groups", - // "httpMethod": "POST", - // "id": "monitoring.projects.groups.create", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The project in which to create the group. The format is \"projects/{project_id_or_number}\".", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "validateOnly": { - // "description": "If true, validate this request but do not create the group.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "v3/{+name}/groups", - // "request": { - // "$ref": "Group" - // }, - // "response": { - // "$ref": "Group" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// method id "monitoring.projects.groups.delete": - -type ProjectsGroupsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes an existing group. -func (r *ProjectsGroupsService) Delete(name string) *ProjectsGroupsDeleteCall { - c := &ProjectsGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsGroupsDeleteCall) Fields(s ...googleapi.Field) *ProjectsGroupsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsGroupsDeleteCall) Context(ctx context.Context) *ProjectsGroupsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsGroupsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.groups.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes an existing group.", - // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - // "httpMethod": "DELETE", - // "id": "monitoring.projects.groups.delete", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The group to delete. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - // "location": "path", - // "pattern": "^projects/[^/]+/groups/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+name}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// method id "monitoring.projects.groups.get": - -type ProjectsGroupsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Gets a single group. -func (r *ProjectsGroupsService) Get(name string) *ProjectsGroupsGetCall { - c := &ProjectsGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsGroupsGetCall) Fields(s ...googleapi.Field) *ProjectsGroupsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGroupsGetCall) IfNoneMatch(entityTag string) *ProjectsGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsGroupsGetCall) Context(ctx context.Context) *ProjectsGroupsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsGroupsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsGroupsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.groups.get" call. -// Exactly one of *Group or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Group.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGroupsGetCall) Do(opts ...googleapi.CallOption) (*Group, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Group{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets a single group.", - // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - // "httpMethod": "GET", - // "id": "monitoring.projects.groups.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The group to retrieve. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - // "location": "path", - // "pattern": "^projects/[^/]+/groups/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+name}", - // "response": { - // "$ref": "Group" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" - // ] - // } - -} - -// method id "monitoring.projects.groups.list": - -type ProjectsGroupsListCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists the existing groups. -func (r *ProjectsGroupsService) List(name string) *ProjectsGroupsListCall { - c := &ProjectsGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// AncestorsOfGroup sets the optional parameter "ancestorsOfGroup": A -// group name: "projects/{project_id_or_number}/groups/{group_id}". -// Returns groups that are ancestors of the specified group. The groups -// are returned in order, starting with the immediate parent and ending -// with the most distant ancestor. If the specified group has no -// immediate parent, the results are empty. -func (c *ProjectsGroupsListCall) AncestorsOfGroup(ancestorsOfGroup string) *ProjectsGroupsListCall { - c.urlParams_.Set("ancestorsOfGroup", ancestorsOfGroup) - return c -} - -// ChildrenOfGroup sets the optional parameter "childrenOfGroup": A -// group name: "projects/{project_id_or_number}/groups/{group_id}". -// Returns groups whose parentName field contains the group name. If no -// groups have this parent, the results are empty. -func (c *ProjectsGroupsListCall) ChildrenOfGroup(childrenOfGroup string) *ProjectsGroupsListCall { - c.urlParams_.Set("childrenOfGroup", childrenOfGroup) - return c -} - -// DescendantsOfGroup sets the optional parameter "descendantsOfGroup": -// A group name: "projects/{project_id_or_number}/groups/{group_id}". -// Returns the descendants of the specified group. This is a superset of -// the results returned by the childrenOfGroup filter, and includes -// children-of-children, and so forth. -func (c *ProjectsGroupsListCall) DescendantsOfGroup(descendantsOfGroup string) *ProjectsGroupsListCall { - c.urlParams_.Set("descendantsOfGroup", descendantsOfGroup) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. -func (c *ProjectsGroupsListCall) PageSize(pageSize int64) *ProjectsGroupsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *ProjectsGroupsListCall) PageToken(pageToken string) *ProjectsGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsGroupsListCall) Fields(s ...googleapi.Field) *ProjectsGroupsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGroupsListCall) IfNoneMatch(entityTag string) *ProjectsGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsGroupsListCall) Context(ctx context.Context) *ProjectsGroupsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsGroupsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsGroupsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/groups") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.groups.list" call. -// Exactly one of *ListGroupsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListGroupsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsGroupsListCall) Do(opts ...googleapi.CallOption) (*ListGroupsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListGroupsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists the existing groups.", - // "flatPath": "v3/projects/{projectsId}/groups", - // "httpMethod": "GET", - // "id": "monitoring.projects.groups.list", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "ancestorsOfGroup": { - // "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty.", - // "location": "query", - // "type": "string" - // }, - // "childrenOfGroup": { - // "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups whose parentName field contains the group name. If no groups have this parent, the results are empty.", - // "location": "query", - // "type": "string" - // }, - // "descendantsOfGroup": { - // "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns the descendants of the specified group. This is a superset of the results returned by the childrenOfGroup filter, and includes children-of-children, and so forth.", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "The project whose groups are to be listed. The format is \"projects/{project_id_or_number}\".", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v3/{+name}/groups", - // "response": { - // "$ref": "ListGroupsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsGroupsListCall) Pages(ctx context.Context, f func(*ListGroupsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "monitoring.projects.groups.update": - -type ProjectsGroupsUpdateCall struct { - s *Service - name string - group *Group - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an existing group. You can change any group -// attributes except name. -func (r *ProjectsGroupsService) Update(name string, group *Group) *ProjectsGroupsUpdateCall { - c := &ProjectsGroupsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.group = group - return c -} - -// ValidateOnly sets the optional parameter "validateOnly": If true, -// validate this request but do not update the existing group. -func (c *ProjectsGroupsUpdateCall) ValidateOnly(validateOnly bool) *ProjectsGroupsUpdateCall { - c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsGroupsUpdateCall) Fields(s ...googleapi.Field) *ProjectsGroupsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsGroupsUpdateCall) Context(ctx context.Context) *ProjectsGroupsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsGroupsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsGroupsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.groups.update" call. -// Exactly one of *Group or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Group.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGroupsUpdateCall) Do(opts ...googleapi.CallOption) (*Group, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Group{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an existing group. You can change any group attributes except name.", - // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - // "httpMethod": "PUT", - // "id": "monitoring.projects.groups.update", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Output only. The name of this group. The format is \"projects/{project_id_or_number}/groups/{group_id}\". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.", - // "location": "path", - // "pattern": "^projects/[^/]+/groups/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "validateOnly": { - // "description": "If true, validate this request but do not update the existing group.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "v3/{+name}", - // "request": { - // "$ref": "Group" - // }, - // "response": { - // "$ref": "Group" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// method id "monitoring.projects.groups.members.list": - -type ProjectsGroupsMembersListCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists the monitored resources that are members of a group. -func (r *ProjectsGroupsMembersService) List(name string) *ProjectsGroupsMembersListCall { - c := &ProjectsGroupsMembersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Filter sets the optional parameter "filter": An optional list filter -// describing the members to be returned. The filter may reference the -// type, labels, and metadata of monitored resources that comprise the -// group. For example, to return only resources representing Compute -// Engine VM instances, use this filter: -// resource.type = "gce_instance" -func (c *ProjectsGroupsMembersListCall) Filter(filter string) *ProjectsGroupsMembersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IntervalEndTime sets the optional parameter "interval.endTime": -// Required. The end of the time interval. -func (c *ProjectsGroupsMembersListCall) IntervalEndTime(intervalEndTime string) *ProjectsGroupsMembersListCall { - c.urlParams_.Set("interval.endTime", intervalEndTime) - return c -} - -// IntervalStartTime sets the optional parameter "interval.startTime": -// The beginning of the time interval. The default value for the start -// time is the end time. The start time must not be later than the end -// time. -func (c *ProjectsGroupsMembersListCall) IntervalStartTime(intervalStartTime string) *ProjectsGroupsMembersListCall { - c.urlParams_.Set("interval.startTime", intervalStartTime) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. -func (c *ProjectsGroupsMembersListCall) PageSize(pageSize int64) *ProjectsGroupsMembersListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *ProjectsGroupsMembersListCall) PageToken(pageToken string) *ProjectsGroupsMembersListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsGroupsMembersListCall) Fields(s ...googleapi.Field) *ProjectsGroupsMembersListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGroupsMembersListCall) IfNoneMatch(entityTag string) *ProjectsGroupsMembersListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsGroupsMembersListCall) Context(ctx context.Context) *ProjectsGroupsMembersListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsGroupsMembersListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsGroupsMembersListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/members") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.groups.members.list" call. -// Exactly one of *ListGroupMembersResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ListGroupMembersResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsGroupsMembersListCall) Do(opts ...googleapi.CallOption) (*ListGroupMembersResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListGroupMembersResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists the monitored resources that are members of a group.", - // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}/members", - // "httpMethod": "GET", - // "id": "monitoring.projects.groups.members.list", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "filter": { - // "description": "An optional list filter describing the members to be returned. The filter may reference the type, labels, and metadata of monitored resources that comprise the group. For example, to return only resources representing Compute Engine VM instances, use this filter:\nresource.type = \"gce_instance\"\n", - // "location": "query", - // "type": "string" - // }, - // "interval.endTime": { - // "description": "Required. The end of the time interval.", - // "format": "google-datetime", - // "location": "query", - // "type": "string" - // }, - // "interval.startTime": { - // "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", - // "format": "google-datetime", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "The group whose members are listed. The format is \"projects/{project_id_or_number}/groups/{group_id}\".", - // "location": "path", - // "pattern": "^projects/[^/]+/groups/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v3/{+name}/members", - // "response": { - // "$ref": "ListGroupMembersResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsGroupsMembersListCall) Pages(ctx context.Context, f func(*ListGroupMembersResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "monitoring.projects.metricDescriptors.create": - -type ProjectsMetricDescriptorsCreateCall struct { - s *Service - name string - metricdescriptor *MetricDescriptor - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Creates a new metric descriptor. User-created metric -// descriptors define custom metrics. -func (r *ProjectsMetricDescriptorsService) Create(name string, metricdescriptor *MetricDescriptor) *ProjectsMetricDescriptorsCreateCall { - c := &ProjectsMetricDescriptorsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.metricdescriptor = metricdescriptor - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMetricDescriptorsCreateCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMetricDescriptorsCreateCall) Context(ctx context.Context) *ProjectsMetricDescriptorsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMetricDescriptorsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMetricDescriptorsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/metricDescriptors") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.metricDescriptors.create" call. -// Exactly one of *MetricDescriptor or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *MetricDescriptor.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsMetricDescriptorsCreateCall) Do(opts ...googleapi.CallOption) (*MetricDescriptor, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &MetricDescriptor{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new metric descriptor. User-created metric descriptors define custom metrics.", - // "flatPath": "v3/projects/{projectsId}/metricDescriptors", - // "httpMethod": "POST", - // "id": "monitoring.projects.metricDescriptors.create", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+name}/metricDescriptors", - // "request": { - // "$ref": "MetricDescriptor" - // }, - // "response": { - // "$ref": "MetricDescriptor" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.write" - // ] - // } - -} - -// method id "monitoring.projects.metricDescriptors.delete": - -type ProjectsMetricDescriptorsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes a metric descriptor. Only user-created custom metrics -// can be deleted. -func (r *ProjectsMetricDescriptorsService) Delete(name string) *ProjectsMetricDescriptorsDeleteCall { - c := &ProjectsMetricDescriptorsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMetricDescriptorsDeleteCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMetricDescriptorsDeleteCall) Context(ctx context.Context) *ProjectsMetricDescriptorsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMetricDescriptorsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMetricDescriptorsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.metricDescriptors.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsMetricDescriptorsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes a metric descriptor. Only user-created custom metrics can be deleted.", - // "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", - // "httpMethod": "DELETE", - // "id": "monitoring.projects.metricDescriptors.delete", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example of {metric_id} is: \"custom.googleapis.com/my_test_metric\".", - // "location": "path", - // "pattern": "^projects/[^/]+/metricDescriptors/.+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+name}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" - // ] - // } - -} - -// method id "monitoring.projects.metricDescriptors.get": - -type ProjectsMetricDescriptorsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Gets a single metric descriptor. This method does not require a -// Stackdriver account. -func (r *ProjectsMetricDescriptorsService) Get(name string) *ProjectsMetricDescriptorsGetCall { - c := &ProjectsMetricDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMetricDescriptorsGetCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsMetricDescriptorsGetCall) IfNoneMatch(entityTag string) *ProjectsMetricDescriptorsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMetricDescriptorsGetCall) Context(ctx context.Context) *ProjectsMetricDescriptorsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMetricDescriptorsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMetricDescriptorsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.metricDescriptors.get" call. -// Exactly one of *MetricDescriptor or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *MetricDescriptor.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsMetricDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*MetricDescriptor, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &MetricDescriptor{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets a single metric descriptor. This method does not require a Stackdriver account.", - // "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", - // "httpMethod": "GET", - // "id": "monitoring.projects.metricDescriptors.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example value of {metric_id} is \"compute.googleapis.com/instance/disk/read_bytes_count\".", - // "location": "path", - // "pattern": "^projects/[^/]+/metricDescriptors/.+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+name}", - // "response": { - // "$ref": "MetricDescriptor" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read", - // "https://www.googleapis.com/auth/monitoring.write" - // ] - // } - -} - -// method id "monitoring.projects.metricDescriptors.list": - -type ProjectsMetricDescriptorsListCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists metric descriptors that match a filter. This method does -// not require a Stackdriver account. -func (r *ProjectsMetricDescriptorsService) List(name string) *ProjectsMetricDescriptorsListCall { - c := &ProjectsMetricDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Filter sets the optional parameter "filter": If this field is empty, -// all custom and system-defined metric descriptors are returned. -// Otherwise, the filter specifies which metric descriptors are to be -// returned. For example, the following filter matches all custom -// metrics: -// metric.type = starts_with("custom.googleapis.com/") -func (c *ProjectsMetricDescriptorsListCall) Filter(filter string) *ProjectsMetricDescriptorsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. -func (c *ProjectsMetricDescriptorsListCall) PageSize(pageSize int64) *ProjectsMetricDescriptorsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *ProjectsMetricDescriptorsListCall) PageToken(pageToken string) *ProjectsMetricDescriptorsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMetricDescriptorsListCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsMetricDescriptorsListCall) IfNoneMatch(entityTag string) *ProjectsMetricDescriptorsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMetricDescriptorsListCall) Context(ctx context.Context) *ProjectsMetricDescriptorsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMetricDescriptorsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMetricDescriptorsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/metricDescriptors") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.metricDescriptors.list" call. -// Exactly one of *ListMetricDescriptorsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListMetricDescriptorsResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsMetricDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMetricDescriptorsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListMetricDescriptorsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists metric descriptors that match a filter. This method does not require a Stackdriver account.", - // "flatPath": "v3/projects/{projectsId}/metricDescriptors", - // "httpMethod": "GET", - // "id": "monitoring.projects.metricDescriptors.list", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "filter": { - // "description": "If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the filter specifies which metric descriptors are to be returned. For example, the following filter matches all custom metrics:\nmetric.type = starts_with(\"custom.googleapis.com/\")\n", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v3/{+name}/metricDescriptors", - // "response": { - // "$ref": "ListMetricDescriptorsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read", - // "https://www.googleapis.com/auth/monitoring.write" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsMetricDescriptorsListCall) Pages(ctx context.Context, f func(*ListMetricDescriptorsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "monitoring.projects.monitoredResourceDescriptors.get": - -type ProjectsMonitoredResourceDescriptorsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Gets a single monitored resource descriptor. This method does -// not require a Stackdriver account. -func (r *ProjectsMonitoredResourceDescriptorsService) Get(name string) *ProjectsMonitoredResourceDescriptorsGetCall { - c := &ProjectsMonitoredResourceDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMonitoredResourceDescriptorsGetCall) Fields(s ...googleapi.Field) *ProjectsMonitoredResourceDescriptorsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsMonitoredResourceDescriptorsGetCall) IfNoneMatch(entityTag string) *ProjectsMonitoredResourceDescriptorsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMonitoredResourceDescriptorsGetCall) Context(ctx context.Context) *ProjectsMonitoredResourceDescriptorsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMonitoredResourceDescriptorsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMonitoredResourceDescriptorsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.monitoredResourceDescriptors.get" call. -// Exactly one of *MonitoredResourceDescriptor or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *MonitoredResourceDescriptor.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsMonitoredResourceDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*MonitoredResourceDescriptor, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &MonitoredResourceDescriptor{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets a single monitored resource descriptor. This method does not require a Stackdriver account.", - // "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors/{monitoredResourceDescriptorsId}", - // "httpMethod": "GET", - // "id": "monitoring.projects.monitoredResourceDescriptors.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The monitored resource descriptor to get. The format is \"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}\". The {resource_type} is a predefined type, such as cloudsql_database.", - // "location": "path", - // "pattern": "^projects/[^/]+/monitoredResourceDescriptors/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+name}", - // "response": { - // "$ref": "MonitoredResourceDescriptor" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read", - // "https://www.googleapis.com/auth/monitoring.write" - // ] - // } - -} - -// method id "monitoring.projects.monitoredResourceDescriptors.list": - -type ProjectsMonitoredResourceDescriptorsListCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists monitored resource descriptors that match a filter. This -// method does not require a Stackdriver account. -func (r *ProjectsMonitoredResourceDescriptorsService) List(name string) *ProjectsMonitoredResourceDescriptorsListCall { - c := &ProjectsMonitoredResourceDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Filter sets the optional parameter "filter": An optional filter -// describing the descriptors to be returned. The filter can reference -// the descriptor's type and labels. For example, the following filter -// returns only Google Compute Engine descriptors that have an id -// label: -// resource.type = starts_with("gce_") AND resource.label:id -func (c *ProjectsMonitoredResourceDescriptorsListCall) Filter(filter string) *ProjectsMonitoredResourceDescriptorsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. -func (c *ProjectsMonitoredResourceDescriptorsListCall) PageSize(pageSize int64) *ProjectsMonitoredResourceDescriptorsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *ProjectsMonitoredResourceDescriptorsListCall) PageToken(pageToken string) *ProjectsMonitoredResourceDescriptorsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMonitoredResourceDescriptorsListCall) Fields(s ...googleapi.Field) *ProjectsMonitoredResourceDescriptorsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsMonitoredResourceDescriptorsListCall) IfNoneMatch(entityTag string) *ProjectsMonitoredResourceDescriptorsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMonitoredResourceDescriptorsListCall) Context(ctx context.Context) *ProjectsMonitoredResourceDescriptorsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMonitoredResourceDescriptorsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/monitoredResourceDescriptors") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.monitoredResourceDescriptors.list" call. -// Exactly one of *ListMonitoredResourceDescriptorsResponse or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *ListMonitoredResourceDescriptorsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsMonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListMonitoredResourceDescriptorsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.", - // "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors", - // "httpMethod": "GET", - // "id": "monitoring.projects.monitoredResourceDescriptors.list", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "filter": { - // "description": "An optional filter describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an id label:\nresource.type = starts_with(\"gce_\") AND resource.label:id\n", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v3/{+name}/monitoredResourceDescriptors", - // "response": { - // "$ref": "ListMonitoredResourceDescriptorsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read", - // "https://www.googleapis.com/auth/monitoring.write" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsMonitoredResourceDescriptorsListCall) Pages(ctx context.Context, f func(*ListMonitoredResourceDescriptorsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "monitoring.projects.timeSeries.create": - -type ProjectsTimeSeriesCreateCall struct { - s *Service - name string - createtimeseriesrequest *CreateTimeSeriesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Creates or adds data to one or more time series. The response -// is empty if all time series in the request were written. If any time -// series could not be written, a corresponding failure message is -// included in the error response. -func (r *ProjectsTimeSeriesService) Create(name string, createtimeseriesrequest *CreateTimeSeriesRequest) *ProjectsTimeSeriesCreateCall { - c := &ProjectsTimeSeriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.createtimeseriesrequest = createtimeseriesrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTimeSeriesCreateCall) Fields(s ...googleapi.Field) *ProjectsTimeSeriesCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTimeSeriesCreateCall) Context(ctx context.Context) *ProjectsTimeSeriesCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsTimeSeriesCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsTimeSeriesCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.createtimeseriesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/timeSeries") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.timeSeries.create" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response.", - // "flatPath": "v3/projects/{projectsId}/timeSeries", - // "httpMethod": "POST", - // "id": "monitoring.projects.timeSeries.create", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v3/{+name}/timeSeries", - // "request": { - // "$ref": "CreateTimeSeriesRequest" - // }, - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.write" - // ] - // } - -} - -// method id "monitoring.projects.timeSeries.list": - -type ProjectsTimeSeriesListCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists time series that match a filter. This method does not -// require a Stackdriver account. -func (r *ProjectsTimeSeriesService) List(name string) *ProjectsTimeSeriesListCall { - c := &ProjectsTimeSeriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// AggregationAlignmentPeriod sets the optional parameter -// "aggregation.alignmentPeriod": The alignment period for per-time -// series alignment. If present, alignmentPeriod must be at least 60 -// seconds. After per-time series alignment, each time series will -// contain data points only on the period boundaries. If -// perSeriesAligner is not specified or equals ALIGN_NONE, then this -// field is ignored. If perSeriesAligner is specified and does not equal -// ALIGN_NONE, then this field must be defined; otherwise an error is -// returned. -func (c *ProjectsTimeSeriesListCall) AggregationAlignmentPeriod(aggregationAlignmentPeriod string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("aggregation.alignmentPeriod", aggregationAlignmentPeriod) - return c -} - -// AggregationCrossSeriesReducer sets the optional parameter -// "aggregation.crossSeriesReducer": The approach to be used to combine -// time series. Not all reducer functions may be applied to all time -// series, depending on the metric type and the value type of the -// original time series. Reduction may change the metric type of value -// type of the time series.Time series data must be aligned in order to -// perform cross-time series reduction. If crossSeriesReducer is -// specified, then perSeriesAligner must be specified and not equal -// ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error -// is returned. -// -// Possible values: -// "REDUCE_NONE" -// "REDUCE_MEAN" -// "REDUCE_MIN" -// "REDUCE_MAX" -// "REDUCE_SUM" -// "REDUCE_STDDEV" -// "REDUCE_COUNT" -// "REDUCE_COUNT_TRUE" -// "REDUCE_FRACTION_TRUE" -// "REDUCE_PERCENTILE_99" -// "REDUCE_PERCENTILE_95" -// "REDUCE_PERCENTILE_50" -// "REDUCE_PERCENTILE_05" -func (c *ProjectsTimeSeriesListCall) AggregationCrossSeriesReducer(aggregationCrossSeriesReducer string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("aggregation.crossSeriesReducer", aggregationCrossSeriesReducer) - return c -} - -// AggregationGroupByFields sets the optional parameter -// "aggregation.groupByFields": The set of fields to preserve when -// crossSeriesReducer is specified. The groupByFields determine how the -// time series are partitioned into subsets prior to applying the -// aggregation function. Each subset contains time series that have the -// same value for each of the grouping fields. Each individual time -// series is a member of exactly one subset. The crossSeriesReducer is -// applied to each subset of time series. It is not possible to reduce -// across different resource types, so this field implicitly contains -// resource.type. Fields not specified in groupByFields are aggregated -// away. If groupByFields is not specified and all the time series have -// the same resource type, then the time series are aggregated into a -// single output time series. If crossSeriesReducer is not defined, this -// field is ignored. -func (c *ProjectsTimeSeriesListCall) AggregationGroupByFields(aggregationGroupByFields ...string) *ProjectsTimeSeriesListCall { - c.urlParams_.SetMulti("aggregation.groupByFields", append([]string{}, aggregationGroupByFields...)) - return c -} - -// AggregationPerSeriesAligner sets the optional parameter -// "aggregation.perSeriesAligner": The approach to be used to align -// individual time series. Not all alignment functions may be applied to -// all time series, depending on the metric type and value type of the -// original time series. Alignment may change the metric type or the -// value type of the time series.Time series data must be aligned in -// order to perform cross-time series reduction. If crossSeriesReducer -// is specified, then perSeriesAligner must be specified and not equal -// ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error -// is returned. -// -// Possible values: -// "ALIGN_NONE" -// "ALIGN_DELTA" -// "ALIGN_RATE" -// "ALIGN_INTERPOLATE" -// "ALIGN_NEXT_OLDER" -// "ALIGN_MIN" -// "ALIGN_MAX" -// "ALIGN_MEAN" -// "ALIGN_COUNT" -// "ALIGN_SUM" -// "ALIGN_STDDEV" -// "ALIGN_COUNT_TRUE" -// "ALIGN_FRACTION_TRUE" -// "ALIGN_PERCENTILE_99" -// "ALIGN_PERCENTILE_95" -// "ALIGN_PERCENTILE_50" -// "ALIGN_PERCENTILE_05" -func (c *ProjectsTimeSeriesListCall) AggregationPerSeriesAligner(aggregationPerSeriesAligner string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("aggregation.perSeriesAligner", aggregationPerSeriesAligner) - return c -} - -// Filter sets the optional parameter "filter": A monitoring filter that -// specifies which time series should be returned. The filter must -// specify a single metric type, and can additionally specify metric -// labels and other information. For example: -// metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND -// metric.label.instance_name = "my-instance-name" -func (c *ProjectsTimeSeriesListCall) Filter(filter string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IntervalEndTime sets the optional parameter "interval.endTime": -// Required. The end of the time interval. -func (c *ProjectsTimeSeriesListCall) IntervalEndTime(intervalEndTime string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("interval.endTime", intervalEndTime) - return c -} - -// IntervalStartTime sets the optional parameter "interval.startTime": -// The beginning of the time interval. The default value for the start -// time is the end time. The start time must not be later than the end -// time. -func (c *ProjectsTimeSeriesListCall) IntervalStartTime(intervalStartTime string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("interval.startTime", intervalStartTime) - return c -} - -// OrderBy sets the optional parameter "orderBy": Specifies the order in -// which the points of the time series should be returned. By default, -// results are not ordered. Currently, this field must be left blank. -func (c *ProjectsTimeSeriesListCall) OrderBy(orderBy string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. When view field sets -// to FULL, it limits the number of Points server will return; if view -// field is HEADERS, it limits the number of TimeSeries server will -// return. -func (c *ProjectsTimeSeriesListCall) PageSize(pageSize int64) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *ProjectsTimeSeriesListCall) PageToken(pageToken string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// SecondaryAggregationAlignmentPeriod sets the optional parameter -// "secondaryAggregation.alignmentPeriod": The alignment period for -// per-time series alignment. If present, alignmentPeriod must be at -// least 60 seconds. After per-time series alignment, each time series -// will contain data points only on the period boundaries. If -// perSeriesAligner is not specified or equals ALIGN_NONE, then this -// field is ignored. If perSeriesAligner is specified and does not equal -// ALIGN_NONE, then this field must be defined; otherwise an error is -// returned. -func (c *ProjectsTimeSeriesListCall) SecondaryAggregationAlignmentPeriod(secondaryAggregationAlignmentPeriod string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("secondaryAggregation.alignmentPeriod", secondaryAggregationAlignmentPeriod) - return c -} - -// SecondaryAggregationCrossSeriesReducer sets the optional parameter -// "secondaryAggregation.crossSeriesReducer": The approach to be used to -// combine time series. Not all reducer functions may be applied to all -// time series, depending on the metric type and the value type of the -// original time series. Reduction may change the metric type of value -// type of the time series.Time series data must be aligned in order to -// perform cross-time series reduction. If crossSeriesReducer is -// specified, then perSeriesAligner must be specified and not equal -// ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error -// is returned. -// -// Possible values: -// "REDUCE_NONE" -// "REDUCE_MEAN" -// "REDUCE_MIN" -// "REDUCE_MAX" -// "REDUCE_SUM" -// "REDUCE_STDDEV" -// "REDUCE_COUNT" -// "REDUCE_COUNT_TRUE" -// "REDUCE_FRACTION_TRUE" -// "REDUCE_PERCENTILE_99" -// "REDUCE_PERCENTILE_95" -// "REDUCE_PERCENTILE_50" -// "REDUCE_PERCENTILE_05" -func (c *ProjectsTimeSeriesListCall) SecondaryAggregationCrossSeriesReducer(secondaryAggregationCrossSeriesReducer string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("secondaryAggregation.crossSeriesReducer", secondaryAggregationCrossSeriesReducer) - return c -} - -// SecondaryAggregationGroupByFields sets the optional parameter -// "secondaryAggregation.groupByFields": The set of fields to preserve -// when crossSeriesReducer is specified. The groupByFields determine how -// the time series are partitioned into subsets prior to applying the -// aggregation function. Each subset contains time series that have the -// same value for each of the grouping fields. Each individual time -// series is a member of exactly one subset. The crossSeriesReducer is -// applied to each subset of time series. It is not possible to reduce -// across different resource types, so this field implicitly contains -// resource.type. Fields not specified in groupByFields are aggregated -// away. If groupByFields is not specified and all the time series have -// the same resource type, then the time series are aggregated into a -// single output time series. If crossSeriesReducer is not defined, this -// field is ignored. -func (c *ProjectsTimeSeriesListCall) SecondaryAggregationGroupByFields(secondaryAggregationGroupByFields ...string) *ProjectsTimeSeriesListCall { - c.urlParams_.SetMulti("secondaryAggregation.groupByFields", append([]string{}, secondaryAggregationGroupByFields...)) - return c -} - -// SecondaryAggregationPerSeriesAligner sets the optional parameter -// "secondaryAggregation.perSeriesAligner": The approach to be used to -// align individual time series. Not all alignment functions may be -// applied to all time series, depending on the metric type and value -// type of the original time series. Alignment may change the metric -// type or the value type of the time series.Time series data must be -// aligned in order to perform cross-time series reduction. If -// crossSeriesReducer is specified, then perSeriesAligner must be -// specified and not equal ALIGN_NONE and alignmentPeriod must be -// specified; otherwise, an error is returned. -// -// Possible values: -// "ALIGN_NONE" -// "ALIGN_DELTA" -// "ALIGN_RATE" -// "ALIGN_INTERPOLATE" -// "ALIGN_NEXT_OLDER" -// "ALIGN_MIN" -// "ALIGN_MAX" -// "ALIGN_MEAN" -// "ALIGN_COUNT" -// "ALIGN_SUM" -// "ALIGN_STDDEV" -// "ALIGN_COUNT_TRUE" -// "ALIGN_FRACTION_TRUE" -// "ALIGN_PERCENTILE_99" -// "ALIGN_PERCENTILE_95" -// "ALIGN_PERCENTILE_50" -// "ALIGN_PERCENTILE_05" -func (c *ProjectsTimeSeriesListCall) SecondaryAggregationPerSeriesAligner(secondaryAggregationPerSeriesAligner string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("secondaryAggregation.perSeriesAligner", secondaryAggregationPerSeriesAligner) - return c -} - -// View sets the optional parameter "view": Specifies which information -// is returned about the time series. -// -// Possible values: -// "FULL" -// "HEADERS" -func (c *ProjectsTimeSeriesListCall) View(view string) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("view", view) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTimeSeriesListCall) Fields(s ...googleapi.Field) *ProjectsTimeSeriesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsTimeSeriesListCall) IfNoneMatch(entityTag string) *ProjectsTimeSeriesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTimeSeriesListCall) Context(ctx context.Context) *ProjectsTimeSeriesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsTimeSeriesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsTimeSeriesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/timeSeries") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "monitoring.projects.timeSeries.list" call. -// Exactly one of *ListTimeSeriesResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListTimeSeriesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTimeSeriesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListTimeSeriesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists time series that match a filter. This method does not require a Stackdriver account.", - // "flatPath": "v3/projects/{projectsId}/timeSeries", - // "httpMethod": "GET", - // "id": "monitoring.projects.timeSeries.list", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "aggregation.alignmentPeriod": { - // "description": "The alignment period for per-time series alignment. If present, alignmentPeriod must be at least 60 seconds. After per-time series alignment, each time series will contain data points only on the period boundaries. If perSeriesAligner is not specified or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then this field must be defined; otherwise an error is returned.", - // "format": "google-duration", - // "location": "query", - // "type": "string" - // }, - // "aggregation.crossSeriesReducer": { - // "description": "The approach to be used to combine time series. Not all reducer functions may be applied to all time series, depending on the metric type and the value type of the original time series. Reduction may change the metric type of value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", - // "enum": [ - // "REDUCE_NONE", - // "REDUCE_MEAN", - // "REDUCE_MIN", - // "REDUCE_MAX", - // "REDUCE_SUM", - // "REDUCE_STDDEV", - // "REDUCE_COUNT", - // "REDUCE_COUNT_TRUE", - // "REDUCE_FRACTION_TRUE", - // "REDUCE_PERCENTILE_99", - // "REDUCE_PERCENTILE_95", - // "REDUCE_PERCENTILE_50", - // "REDUCE_PERCENTILE_05" - // ], - // "location": "query", - // "type": "string" - // }, - // "aggregation.groupByFields": { - // "description": "The set of fields to preserve when crossSeriesReducer is specified. The groupByFields determine how the time series are partitioned into subsets prior to applying the aggregation function. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The crossSeriesReducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in groupByFields are aggregated away. If groupByFields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If crossSeriesReducer is not defined, this field is ignored.", - // "location": "query", - // "repeated": true, - // "type": "string" - // }, - // "aggregation.perSeriesAligner": { - // "description": "The approach to be used to align individual time series. Not all alignment functions may be applied to all time series, depending on the metric type and value type of the original time series. Alignment may change the metric type or the value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", - // "enum": [ - // "ALIGN_NONE", - // "ALIGN_DELTA", - // "ALIGN_RATE", - // "ALIGN_INTERPOLATE", - // "ALIGN_NEXT_OLDER", - // "ALIGN_MIN", - // "ALIGN_MAX", - // "ALIGN_MEAN", - // "ALIGN_COUNT", - // "ALIGN_SUM", - // "ALIGN_STDDEV", - // "ALIGN_COUNT_TRUE", - // "ALIGN_FRACTION_TRUE", - // "ALIGN_PERCENTILE_99", - // "ALIGN_PERCENTILE_95", - // "ALIGN_PERCENTILE_50", - // "ALIGN_PERCENTILE_05" - // ], - // "location": "query", - // "type": "string" - // }, - // "filter": { - // "description": "A monitoring filter that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example:\nmetric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND\n metric.label.instance_name = \"my-instance-name\"\n", - // "location": "query", - // "type": "string" - // }, - // "interval.endTime": { - // "description": "Required. The end of the time interval.", - // "format": "google-datetime", - // "location": "query", - // "type": "string" - // }, - // "interval.startTime": { - // "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", - // "format": "google-datetime", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "orderBy": { - // "description": "Specifies the order in which the points of the time series should be returned. By default, results are not ordered. Currently, this field must be left blank.", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return. When view field sets to FULL, it limits the number of Points server will return; if view field is HEADERS, it limits the number of TimeSeries server will return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" - // }, - // "secondaryAggregation.alignmentPeriod": { - // "description": "The alignment period for per-time series alignment. If present, alignmentPeriod must be at least 60 seconds. After per-time series alignment, each time series will contain data points only on the period boundaries. If perSeriesAligner is not specified or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then this field must be defined; otherwise an error is returned.", - // "format": "google-duration", - // "location": "query", - // "type": "string" - // }, - // "secondaryAggregation.crossSeriesReducer": { - // "description": "The approach to be used to combine time series. Not all reducer functions may be applied to all time series, depending on the metric type and the value type of the original time series. Reduction may change the metric type of value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", - // "enum": [ - // "REDUCE_NONE", - // "REDUCE_MEAN", - // "REDUCE_MIN", - // "REDUCE_MAX", - // "REDUCE_SUM", - // "REDUCE_STDDEV", - // "REDUCE_COUNT", - // "REDUCE_COUNT_TRUE", - // "REDUCE_FRACTION_TRUE", - // "REDUCE_PERCENTILE_99", - // "REDUCE_PERCENTILE_95", - // "REDUCE_PERCENTILE_50", - // "REDUCE_PERCENTILE_05" - // ], - // "location": "query", - // "type": "string" - // }, - // "secondaryAggregation.groupByFields": { - // "description": "The set of fields to preserve when crossSeriesReducer is specified. The groupByFields determine how the time series are partitioned into subsets prior to applying the aggregation function. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The crossSeriesReducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in groupByFields are aggregated away. If groupByFields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If crossSeriesReducer is not defined, this field is ignored.", - // "location": "query", - // "repeated": true, - // "type": "string" - // }, - // "secondaryAggregation.perSeriesAligner": { - // "description": "The approach to be used to align individual time series. Not all alignment functions may be applied to all time series, depending on the metric type and value type of the original time series. Alignment may change the metric type or the value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.", - // "enum": [ - // "ALIGN_NONE", - // "ALIGN_DELTA", - // "ALIGN_RATE", - // "ALIGN_INTERPOLATE", - // "ALIGN_NEXT_OLDER", - // "ALIGN_MIN", - // "ALIGN_MAX", - // "ALIGN_MEAN", - // "ALIGN_COUNT", - // "ALIGN_SUM", - // "ALIGN_STDDEV", - // "ALIGN_COUNT_TRUE", - // "ALIGN_FRACTION_TRUE", - // "ALIGN_PERCENTILE_99", - // "ALIGN_PERCENTILE_95", - // "ALIGN_PERCENTILE_50", - // "ALIGN_PERCENTILE_05" - // ], - // "location": "query", - // "type": "string" - // }, - // "view": { - // "description": "Specifies which information is returned about the time series.", - // "enum": [ - // "FULL", - // "HEADERS" - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v3/{+name}/timeSeries", - // "response": { - // "$ref": "ListTimeSeriesResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsTimeSeriesListCall) Pages(ctx context.Context, f func(*ListTimeSeriesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json b/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json deleted file mode 100644 index 7a0ef731bb7..00000000000 --- a/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json +++ /dev/null @@ -1,1159 +0,0 @@ -{ - "kind": "discovery#restDescription", - "description": "Provides reliable, many-to-many, asynchronous messaging between applications.\n", - "servicePath": "", - "rootUrl": "https://pubsub.googleapis.com/", - "basePath": "", - "ownerDomain": "google.com", - "name": "pubsub", - "batchPath": "batch", - "revision": "20170329", - "documentationLink": "https://cloud.google.com/pubsub/docs", - "id": "pubsub:v1", - "title": "Google Cloud Pub/Sub API", - "ownerName": "Google", - "discoveryVersion": "v1", - "resources": { - "projects": { - "resources": { - "topics": { - "methods": { - "get": { - "description": "Gets the configuration of a topic.", - "response": { - "$ref": "Topic" - }, - "parameterOrder": [ - "topic" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "topic": { - "location": "path", - "description": "The name of the topic to get.\nFormat is `projects/{project}/topics/{topic}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/topics/[^/]+$" - } - }, - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", - "path": "v1/{+topic}", - "id": "pubsub.projects.topics.get" - }, - "publish": { - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:publish", - "path": "v1/{+topic}:publish", - "id": "pubsub.projects.topics.publish", - "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic\ndoes not exist. The message payload must not be empty; it must contain\n either a non-empty data field, or at least one attribute.", - "request": { - "$ref": "PublishRequest" - }, - "response": { - "$ref": "PublishResponse" - }, - "parameterOrder": [ - "topic" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "topic": { - "description": "The messages in the request will be published on this topic.\nFormat is `projects/{project}/topics/{topic}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/topics/[^/]+$", - "location": "path" - } - } - }, - "testIamPermissions": { - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "POST", - "parameters": { - "resource": { - "location": "path", - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/topics/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:testIamPermissions", - "path": "v1/{+resource}:testIamPermissions", - "id": "pubsub.projects.topics.testIamPermissions", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning." - }, - "delete": { - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", - "id": "pubsub.projects.topics.delete", - "path": "v1/{+topic}", - "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic\ndoes not exist. After a topic is deleted, a new topic may be created with\nthe same name; this is an entirely new topic with none of the old\nconfiguration or subscriptions. Existing subscriptions to this topic are\nnot deleted, but their `topic` field is set to `_deleted-topic_`.", - "httpMethod": "DELETE", - "parameterOrder": [ - "topic" - ], - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "topic": { - "location": "path", - "description": "Name of the topic to delete.\nFormat is `projects/{project}/topics/{topic}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/topics/[^/]+$" - } - } - }, - "list": { - "response": { - "$ref": "ListTopicsResponse" - }, - "parameterOrder": [ - "project" - ], - "httpMethod": "GET", - "parameters": { - "pageToken": { - "description": "The value returned by the last `ListTopicsResponse`; indicates that this is\na continuation of a prior `ListTopics` call, and that the system should\nreturn the next page of data.", - "type": "string", - "location": "query" - }, - "pageSize": { - "description": "Maximum number of topics to return.", - "format": "int32", - "type": "integer", - "location": "query" - }, - "project": { - "location": "path", - "description": "The name of the cloud project that topics belong to.\nFormat is `projects/{project}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/topics", - "path": "v1/{+project}/topics", - "id": "pubsub.projects.topics.list", - "description": "Lists matching topics." - }, - "setIamPolicy": { - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "POST", - "parameters": { - "resource": { - "location": "path", - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/topics/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:setIamPolicy", - "path": "v1/{+resource}:setIamPolicy", - "id": "pubsub.projects.topics.setIamPolicy", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy." - }, - "create": { - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", - "id": "pubsub.projects.topics.create", - "path": "v1/{+name}", - "request": { - "$ref": "Topic" - }, - "description": "Creates the given topic with the given name.", - "httpMethod": "PUT", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "Topic" - }, - "parameters": { - "name": { - "location": "path", - "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/topics/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "getIamPolicy": { - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:getIamPolicy", - "id": "pubsub.projects.topics.getIamPolicy", - "path": "v1/{+resource}:getIamPolicy", - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "httpMethod": "GET", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/topics/[^/]+$", - "location": "path" - } - } - } - }, - "resources": { - "subscriptions": { - "methods": { - "list": { - "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/subscriptions", - "id": "pubsub.projects.topics.subscriptions.list", - "path": "v1/{+topic}/subscriptions", - "description": "Lists the name of the subscriptions for this topic.", - "httpMethod": "GET", - "parameterOrder": [ - "topic" - ], - "response": { - "$ref": "ListTopicSubscriptionsResponse" - }, - "parameters": { - "pageSize": { - "location": "query", - "description": "Maximum number of subscription names to return.", - "format": "int32", - "type": "integer" - }, - "topic": { - "location": "path", - "description": "The name of the topic that subscriptions are attached to.\nFormat is `projects/{project}/topics/{topic}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/topics/[^/]+$" - }, - "pageToken": { - "location": "query", - "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSubscriptions` call, and\nthat the system should return the next page of data.", - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - } - } - } - } - }, - "subscriptions": { - "methods": { - "list": { - "httpMethod": "GET", - "response": { - "$ref": "ListSubscriptionsResponse" - }, - "parameterOrder": [ - "project" - ], - "parameters": { - "pageToken": { - "location": "query", - "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that\nthis is a continuation of a prior `ListSubscriptions` call, and that the\nsystem should return the next page of data.", - "type": "string" - }, - "pageSize": { - "location": "query", - "description": "Maximum number of subscriptions to return.", - "format": "int32", - "type": "integer" - }, - "project": { - "description": "The name of the cloud project that subscriptions belong to.\nFormat is `projects/{project}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/subscriptions", - "id": "pubsub.projects.subscriptions.list", - "path": "v1/{+project}/subscriptions", - "description": "Lists matching subscriptions." - }, - "setIamPolicy": { - "request": { - "$ref": "SetIamPolicyRequest" - }, - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:setIamPolicy", - "path": "v1/{+resource}:setIamPolicy", - "id": "pubsub.projects.subscriptions.setIamPolicy" - }, - "create": { - "response": { - "$ref": "Subscription" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "PUT", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "name": { - "location": "path", - "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/subscriptions/[^/]+$" - } - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - "path": "v1/{+name}", - "id": "pubsub.projects.subscriptions.create", - "description": "Creates a subscription to a given topic.\nIf the subscription already exists, returns `ALREADY_EXISTS`.\nIf the corresponding topic doesn't exist, returns `NOT_FOUND`.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic, conforming\nto the\n[resource name format](https://cloud.google.com/pubsub/docs/overview#names).\nThe generated name is populated in the returned Subscription object.\nNote that for REST API requests, you must specify a name in the request.", - "request": { - "$ref": "Subscription" - } - }, - "acknowledge": { - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:acknowledge", - "path": "v1/{+subscription}:acknowledge", - "id": "pubsub.projects.subscriptions.acknowledge", - "description": "Acknowledges the messages associated with the `ack_ids` in the\n`AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages\nfrom the subscription.\n\nAcknowledging a message whose ack deadline has expired may succeed,\nbut such a message may be redelivered later. Acknowledging a message more\nthan once will not result in an error.", - "request": { - "$ref": "AcknowledgeRequest" - }, - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "subscription" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "subscription": { - "description": "The subscription whose message is being acknowledged.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path" - } - } - }, - "getIamPolicy": { - "httpMethod": "GET", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "Policy" - }, - "parameters": { - "resource": { - "location": "path", - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/subscriptions/[^/]+$" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:getIamPolicy", - "id": "pubsub.projects.subscriptions.getIamPolicy", - "path": "v1/{+resource}:getIamPolicy", - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset." - }, - "modifyAckDeadline": { - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "subscription" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "subscription": { - "location": "path", - "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/subscriptions/[^/]+$" - } - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyAckDeadline", - "path": "v1/{+subscription}:modifyAckDeadline", - "id": "pubsub.projects.subscriptions.modifyAckDeadline", - "description": "Modifies the ack deadline for a specific message. This method is useful\nto indicate that more time is needed to process a message by the\nsubscriber, or to make the message available for redelivery if the\nprocessing was interrupted. Note that this does not modify the\nsubscription-level `ackDeadlineSeconds` used for subsequent messages.", - "request": { - "$ref": "ModifyAckDeadlineRequest" - } - }, - "get": { - "description": "Gets the configuration details of a subscription.", - "response": { - "$ref": "Subscription" - }, - "parameterOrder": [ - "subscription" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "subscription": { - "location": "path", - "description": "The name of the subscription to get.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/subscriptions/[^/]+$" - } - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - "path": "v1/{+subscription}", - "id": "pubsub.projects.subscriptions.get" - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "resource": { - "location": "path", - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/subscriptions/[^/]+$" - } - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:testIamPermissions", - "path": "v1/{+resource}:testIamPermissions", - "id": "pubsub.projects.subscriptions.testIamPermissions" - }, - "modifyPushConfig": { - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "subscription" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "subscription": { - "location": "path", - "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/subscriptions/[^/]+$" - } - }, - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyPushConfig", - "path": "v1/{+subscription}:modifyPushConfig", - "id": "pubsub.projects.subscriptions.modifyPushConfig", - "description": "Modifies the `PushConfig` for a specified subscription.\n\nThis may be used to change a push subscription to a pull one (signified by\nan empty `PushConfig`) or vice versa, or change the endpoint URL and other\nattributes of a push subscription. Messages will accumulate for delivery\ncontinuously through the call regardless of changes to the `PushConfig`.", - "request": { - "$ref": "ModifyPushConfigRequest" - } - }, - "pull": { - "request": { - "$ref": "PullRequest" - }, - "description": "Pulls messages from the server. Returns an empty list if there are no\nmessages available in the backlog. The server may return `UNAVAILABLE` if\nthere are too many concurrent pull requests pending for the given\nsubscription.", - "response": { - "$ref": "PullResponse" - }, - "parameterOrder": [ - "subscription" - ], - "httpMethod": "POST", - "parameters": { - "subscription": { - "description": "The subscription from which messages should be pulled.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:pull", - "path": "v1/{+subscription}:pull", - "id": "pubsub.projects.subscriptions.pull" - }, - "delete": { - "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - "path": "v1/{+subscription}", - "id": "pubsub.projects.subscriptions.delete", - "description": "Deletes an existing subscription. All messages retained in the subscription\nare immediately dropped. Calls to `Pull` after deletion will return\n`NOT_FOUND`. After a subscription is deleted, a new one may be created with\nthe same name, but the new one has no association with the old\nsubscription or its topic unless the same topic is specified.", - "response": { - "$ref": "Empty" - }, - "parameterOrder": [ - "subscription" - ], - "httpMethod": "DELETE", - "parameters": { - "subscription": { - "description": "The subscription to delete.\nFormat is `projects/{project}/subscriptions/{sub}`.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - } - } - }, - "snapshots": { - "methods": { - "setIamPolicy": { - "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:setIamPolicy", - "path": "v1/{+resource}:setIamPolicy", - "id": "pubsub.projects.snapshots.setIamPolicy", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/snapshots/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ] - }, - "testIamPermissions": { - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/snapshots/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:testIamPermissions", - "path": "v1/{+resource}:testIamPermissions", - "id": "pubsub.projects.snapshots.testIamPermissions" - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/pubsub" - ], - "parameters": { - "resource": { - "location": "path", - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "required": true, - "type": "string", - "pattern": "^projects/[^/]+/snapshots/[^/]+$" - } - }, - "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:getIamPolicy", - "path": "v1/{+resource}:getIamPolicy", - "id": "pubsub.projects.snapshots.getIamPolicy" - } - } - } - } - } - }, - "parameters": { - "pp": { - "location": "query", - "description": "Pretty-print response.", - "type": "boolean", - "default": "true" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "oauth_token": { - "location": "query", - "description": "OAuth 2.0 token for the current user.", - "type": "string" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string", - "location": "query" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "type": "boolean", - "default": "true", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "callback": { - "location": "query", - "description": "JSONP", - "type": "string" - }, - "$.xgafv": { - "description": "V1 error format.", - "type": "string", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query", - "enum": [ - "1", - "2" - ] - }, - "alt": { - "type": "string", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "description": "Data format for response.", - "default": "json", - "enum": [ - "json", - "media", - "proto" - ] - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "access_token": { - "description": "OAuth access token.", - "type": "string", - "location": "query" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - } - }, - "schemas": { - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", - "type": "object", - "properties": { - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", - "format": "byte", - "type": "string" - }, - "version": { - "description": "Version of the `Policy`. The default version is 0.", - "format": "int32", - "type": "integer" - }, - "bindings": { - "description": "Associates a list of `members` to a `role`.\nMultiple `bindings` must not be specified for the same `role`.\n`bindings` with no members will result in an error.", - "type": "array", - "items": { - "$ref": "Binding" - } - } - }, - "id": "Policy" - }, - "Topic": { - "description": "A topic resource.", - "type": "object", - "properties": { - "name": { - "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", - "type": "string" - } - }, - "id": "Topic" - }, - "ModifyAckDeadlineRequest": { - "description": "Request for the ModifyAckDeadline method.", - "type": "object", - "properties": { - "ackDeadlineSeconds": { - "description": "The new ack deadline with respect to the time this request was sent to\nthe Pub/Sub system. For example, if the value is 10, the new\nack deadline will expire 10 seconds after the `ModifyAckDeadline` call\nwas made. Specifying zero may immediately make the message available for\nanother pull request.\nThe minimum deadline you can specify is 0 seconds.\nThe maximum deadline you can specify is 600 seconds (10 minutes).", - "format": "int32", - "type": "integer" - }, - "ackIds": { - "description": "List of acknowledgment IDs.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "ModifyAckDeadlineRequest" - }, - "SetIamPolicyRequest": { - "description": "Request message for `SetIamPolicy` method.", - "type": "object", - "properties": { - "policy": { - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them.", - "$ref": "Policy" - } - }, - "id": "SetIamPolicyRequest" - }, - "ModifyPushConfigRequest": { - "description": "Request for the ModifyPushConfig method.", - "type": "object", - "properties": { - "pushConfig": { - "$ref": "PushConfig", - "description": "The push configuration for future deliveries.\n\nAn empty `pushConfig` indicates that the Pub/Sub system should\nstop pushing messages from the given subscription and allow\nmessages to be pulled and acknowledged - effectively pausing\nthe subscription if `Pull` is not called." - } - }, - "id": "ModifyPushConfigRequest" - }, - "PubsubMessage": { - "description": "A message data and its attributes. The message payload must not be empty;\nit must contain either a non-empty data field, or at least one attribute.", - "type": "object", - "properties": { - "data": { - "description": "The message payload.", - "format": "byte", - "type": "string" - }, - "attributes": { - "description": "Optional attributes for this message.", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "messageId": { - "description": "ID of this message, assigned by the server when the message is published.\nGuaranteed to be unique within the topic. This value may be read by a\nsubscriber that receives a `PubsubMessage` via a `Pull` call or a push\ndelivery. It must not be populated by the publisher in a `Publish` call.", - "type": "string" - }, - "publishTime": { - "description": "The time at which the message was published, populated by the server when\nit receives the `Publish` call. It must not be populated by the\npublisher in a `Publish` call.", - "format": "google-datetime", - "type": "string" - } - }, - "id": "PubsubMessage" - }, - "Binding": { - "description": "Associates `members` with a `role`.", - "type": "object", - "properties": { - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", - "type": "array", - "items": { - "type": "string" - } - }, - "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", - "type": "string" - } - }, - "id": "Binding" - }, - "AcknowledgeRequest": { - "description": "Request for the Acknowledge method.", - "type": "object", - "properties": { - "ackIds": { - "description": "The acknowledgment ID for the messages being acknowledged that was returned\nby the Pub/Sub system in the `Pull` response. Must not be empty.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "AcknowledgeRequest" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {}, - "id": "Empty" - }, - "ListTopicsResponse": { - "description": "Response for the `ListTopics` method.", - "type": "object", - "properties": { - "topics": { - "description": "The resulting topics.", - "type": "array", - "items": { - "$ref": "Topic" - } - }, - "nextPageToken": { - "description": "If not empty, indicates that there may be more topics that match the\nrequest; this value should be passed in a new `ListTopicsRequest`.", - "type": "string" - } - }, - "id": "ListTopicsResponse" - }, - "ListTopicSubscriptionsResponse": { - "description": "Response for the `ListTopicSubscriptions` method.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "If not empty, indicates that there may be more subscriptions that match\nthe request; this value should be passed in a new\n`ListTopicSubscriptionsRequest` to get more subscriptions.", - "type": "string" - }, - "subscriptions": { - "description": "The names of the subscriptions that match the request.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "ListTopicSubscriptionsResponse" - }, - "PullResponse": { - "description": "Response for the `Pull` method.", - "type": "object", - "properties": { - "receivedMessages": { - "description": "Received Pub/Sub messages. The Pub/Sub system will return zero messages if\nthere are no more available in the backlog. The Pub/Sub system may return\nfewer than the `maxMessages` requested even if there are more messages\navailable in the backlog.", - "type": "array", - "items": { - "$ref": "ReceivedMessage" - } - } - }, - "id": "PullResponse" - }, - "ReceivedMessage": { - "description": "A message and its corresponding acknowledgment ID.", - "type": "object", - "properties": { - "ackId": { - "description": "This ID can be used to acknowledge the received message.", - "type": "string" - }, - "message": { - "$ref": "PubsubMessage", - "description": "The message." - } - }, - "id": "ReceivedMessage" - }, - "PushConfig": { - "description": "Configuration for a push delivery endpoint.", - "type": "object", - "properties": { - "pushEndpoint": { - "description": "A URL locating the endpoint to which messages should be pushed.\nFor example, a Webhook endpoint might use \"https://example.com/push\".", - "type": "string" - }, - "attributes": { - "additionalProperties": { - "type": "string" - }, - "description": "Endpoint configuration attributes.\n\nEvery endpoint has a set of API supported attributes that can be used to\ncontrol different aspects of the message delivery.\n\nThe currently supported attribute is `x-goog-version`, which you can\nuse to change the format of the pushed message. This attribute\nindicates the version of the data expected by the endpoint. This\ncontrols the shape of the pushed message (i.e., its fields and metadata).\nThe endpoint version is based on the version of the Pub/Sub API.\n\nIf not present during the `CreateSubscription` call, it will default to\nthe version of the API used to make such call. If not present during a\n`ModifyPushConfig` call, its value will not be changed. `GetSubscription`\ncalls will always return a valid version, even if the subscription was\ncreated without this attribute.\n\nThe possible values for this attribute are:\n\n* `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API.\n* `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API.", - "type": "object" - } - }, - "id": "PushConfig" - }, - "TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", - "type": "object", - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "TestIamPermissionsResponse" - }, - "PullRequest": { - "description": "Request for the `Pull` method.", - "type": "object", - "properties": { - "returnImmediately": { - "description": "If this field set to true, the system will respond immediately even if\nit there are no messages available to return in the `Pull` response.\nOtherwise, the system may wait (for a bounded amount of time) until at\nleast one message is available, rather than returning no messages. The\nclient may cancel the request if it does not wish to wait any longer for\nthe response.", - "type": "boolean" - }, - "maxMessages": { - "description": "The maximum number of messages returned for this request. The Pub/Sub\nsystem may return fewer than the number specified.", - "format": "int32", - "type": "integer" - } - }, - "id": "PullRequest" - }, - "ListSubscriptionsResponse": { - "description": "Response for the `ListSubscriptions` method.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "If not empty, indicates that there may be more subscriptions that match\nthe request; this value should be passed in a new\n`ListSubscriptionsRequest` to get more subscriptions.", - "type": "string" - }, - "subscriptions": { - "description": "The subscriptions that match the request.", - "type": "array", - "items": { - "$ref": "Subscription" - } - } - }, - "id": "ListSubscriptionsResponse" - }, - "PublishRequest": { - "description": "Request for the Publish method.", - "type": "object", - "properties": { - "messages": { - "description": "The messages to publish.", - "type": "array", - "items": { - "$ref": "PubsubMessage" - } - } - }, - "id": "PublishRequest" - }, - "PublishResponse": { - "description": "Response for the `Publish` method.", - "type": "object", - "properties": { - "messageIds": { - "description": "The server-assigned ID of each published message, in the same order as\nthe messages in the request. IDs are guaranteed to be unique within\nthe topic.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "PublishResponse" - }, - "Subscription": { - "description": "A subscription resource.", - "type": "object", - "properties": { - "ackDeadlineSeconds": { - "description": "This value is the maximum time after a subscriber receives a message\nbefore the subscriber should acknowledge the message. After message\ndelivery but before the ack deadline expires and before the message is\nacknowledged, it is an outstanding message and will not be delivered\nagain during that time (on a best-effort basis).\n\nFor pull subscriptions, this value is used as the initial value for the ack\ndeadline. To override this value for a given message, call\n`ModifyAckDeadline` with the corresponding `ack_id` if using\npull.\nThe minimum custom deadline you can specify is 10 seconds.\nThe maximum custom deadline you can specify is 600 seconds (10 minutes).\nIf this parameter is 0, a default value of 10 seconds is used.\n\nFor push delivery, this value is also used to set the request timeout for\nthe call to the push endpoint.\n\nIf the subscriber never acknowledges the message, the Pub/Sub\nsystem will eventually redeliver the message.", - "format": "int32", - "type": "integer" - }, - "name": { - "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", - "type": "string" - }, - "topic": { - "description": "The name of the topic from which this subscription is receiving messages.\nFormat is `projects/{project}/topics/{topic}`.\nThe value of this field will be `_deleted-topic_` if the topic has been\ndeleted.", - "type": "string" - }, - "pushConfig": { - "$ref": "PushConfig", - "description": "If push delivery is used with this subscription, this field is\nused to configure it. An empty `pushConfig` signifies that the subscriber\nwill pull and ack messages using API methods." - } - }, - "id": "Subscription" - }, - "TestIamPermissionsRequest": { - "description": "Request message for `TestIamPermissions` method.", - "type": "object", - "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "type": "array", - "items": { - "type": "string" - } - } - }, - "id": "TestIamPermissionsRequest" - } - }, - "protocol": "rest", - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "version": "v1", - "baseUrl": "https://pubsub.googleapis.com/", - "canonicalName": "Pubsub", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/pubsub": { - "description": "View and manage Pub/Sub topics and subscriptions" - } - } - } - } -} diff --git a/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go b/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go deleted file mode 100644 index f14f12ad9ae..00000000000 --- a/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go +++ /dev/null @@ -1,4458 +0,0 @@ -// Package pubsub provides access to the Google Cloud Pub/Sub API. -// -// See https://cloud.google.com/pubsub/docs -// -// Usage example: -// -// import "google.golang.org/api/pubsub/v1" -// ... -// pubsubService, err := pubsub.New(oauthHttpClient) -package pubsub // import "google.golang.org/api/pubsub/v1" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" - "io" - "net/http" - "net/url" - "strconv" - "strings" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = ctxhttp.Do - -const apiId = "pubsub:v1" -const apiName = "pubsub" -const apiVersion = "v1" -const basePath = "https://pubsub.googleapis.com/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - - // View and manage Pub/Sub topics and subscriptions - PubsubScope = "https://www.googleapis.com/auth/pubsub" -) - -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.Projects = NewProjectsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - Projects *ProjectsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewProjectsService(s *Service) *ProjectsService { - rs := &ProjectsService{s: s} - rs.Snapshots = NewProjectsSnapshotsService(s) - rs.Subscriptions = NewProjectsSubscriptionsService(s) - rs.Topics = NewProjectsTopicsService(s) - return rs -} - -type ProjectsService struct { - s *Service - - Snapshots *ProjectsSnapshotsService - - Subscriptions *ProjectsSubscriptionsService - - Topics *ProjectsTopicsService -} - -func NewProjectsSnapshotsService(s *Service) *ProjectsSnapshotsService { - rs := &ProjectsSnapshotsService{s: s} - return rs -} - -type ProjectsSnapshotsService struct { - s *Service -} - -func NewProjectsSubscriptionsService(s *Service) *ProjectsSubscriptionsService { - rs := &ProjectsSubscriptionsService{s: s} - return rs -} - -type ProjectsSubscriptionsService struct { - s *Service -} - -func NewProjectsTopicsService(s *Service) *ProjectsTopicsService { - rs := &ProjectsTopicsService{s: s} - rs.Subscriptions = NewProjectsTopicsSubscriptionsService(s) - return rs -} - -type ProjectsTopicsService struct { - s *Service - - Subscriptions *ProjectsTopicsSubscriptionsService -} - -func NewProjectsTopicsSubscriptionsService(s *Service) *ProjectsTopicsSubscriptionsService { - rs := &ProjectsTopicsSubscriptionsService{s: s} - return rs -} - -type ProjectsTopicsSubscriptionsService struct { - s *Service -} - -// AcknowledgeRequest: Request for the Acknowledge method. -type AcknowledgeRequest struct { - // AckIds: The acknowledgment ID for the messages being acknowledged - // that was returned - // by the Pub/Sub system in the `Pull` response. Must not be empty. - AckIds []string `json:"ackIds,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AckIds") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AckIds") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AcknowledgeRequest) MarshalJSON() ([]byte, error) { - type noMethod AcknowledgeRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Binding: Associates `members` with a `role`. -type Binding struct { - // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@gmail.com` or `joe@example.com`. - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // * `domain:{domain}`: A Google Apps domain name that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // - Members []string `json:"members,omitempty"` - - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or - // `roles/owner`. - // Required - Role string `json:"role,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Members") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Members") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Binding) MarshalJSON() ([]byte, error) { - type noMethod Binding - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Empty: A generic empty message that you can re-use to avoid defining -// duplicated -// empty messages in your APIs. A typical example is to use it as the -// request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. -type Empty struct { - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` -} - -// ListSubscriptionsResponse: Response for the `ListSubscriptions` -// method. -type ListSubscriptionsResponse struct { - // NextPageToken: If not empty, indicates that there may be more - // subscriptions that match - // the request; this value should be passed in a - // new - // `ListSubscriptionsRequest` to get more subscriptions. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Subscriptions: The subscriptions that match the request. - Subscriptions []*Subscription `json:"subscriptions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NextPageToken") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListSubscriptionsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListSubscriptionsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListTopicSubscriptionsResponse: Response for the -// `ListTopicSubscriptions` method. -type ListTopicSubscriptionsResponse struct { - // NextPageToken: If not empty, indicates that there may be more - // subscriptions that match - // the request; this value should be passed in a - // new - // `ListTopicSubscriptionsRequest` to get more subscriptions. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Subscriptions: The names of the subscriptions that match the request. - Subscriptions []string `json:"subscriptions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NextPageToken") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListTopicSubscriptionsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListTopicSubscriptionsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListTopicsResponse: Response for the `ListTopics` method. -type ListTopicsResponse struct { - // NextPageToken: If not empty, indicates that there may be more topics - // that match the - // request; this value should be passed in a new `ListTopicsRequest`. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Topics: The resulting topics. - Topics []*Topic `json:"topics,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NextPageToken") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListTopicsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListTopicsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ModifyAckDeadlineRequest: Request for the ModifyAckDeadline method. -type ModifyAckDeadlineRequest struct { - // AckDeadlineSeconds: The new ack deadline with respect to the time - // this request was sent to - // the Pub/Sub system. For example, if the value is 10, the new - // ack deadline will expire 10 seconds after the `ModifyAckDeadline` - // call - // was made. Specifying zero may immediately make the message available - // for - // another pull request. - // The minimum deadline you can specify is 0 seconds. - // The maximum deadline you can specify is 600 seconds (10 minutes). - AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` - - // AckIds: List of acknowledgment IDs. - AckIds []string `json:"ackIds,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AckDeadlineSeconds") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AckDeadlineSeconds") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ModifyAckDeadlineRequest) MarshalJSON() ([]byte, error) { - type noMethod ModifyAckDeadlineRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ModifyPushConfigRequest: Request for the ModifyPushConfig method. -type ModifyPushConfigRequest struct { - // PushConfig: The push configuration for future deliveries. - // - // An empty `pushConfig` indicates that the Pub/Sub system should - // stop pushing messages from the given subscription and allow - // messages to be pulled and acknowledged - effectively pausing - // the subscription if `Pull` is not called. - PushConfig *PushConfig `json:"pushConfig,omitempty"` - - // ForceSendFields is a list of field names (e.g. "PushConfig") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "PushConfig") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ModifyPushConfigRequest) MarshalJSON() ([]byte, error) { - type noMethod ModifyPushConfigRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Policy: Defines an Identity and Access Management (IAM) policy. It is -// used to -// specify access control policies for Cloud Platform resources. -// -// -// A `Policy` consists of a list of `bindings`. A `Binding` binds a list -// of -// `members` to a `role`, where the members can be user accounts, Google -// groups, -// Google domains, and service accounts. A `role` is a named list of -// permissions -// defined by IAM. -// -// **Example** -// -// { -// "bindings": [ -// { -// "role": "roles/owner", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-other-app@appspot.gserviceaccount.com", -// ] -// }, -// { -// "role": "roles/viewer", -// "members": ["user:sean@example.com"] -// } -// ] -// } -// -// For a description of IAM and its features, see the -// [IAM developer's guide](https://cloud.google.com/iam). -type Policy struct { - // Bindings: Associates a list of `members` to a `role`. - // Multiple `bindings` must not be specified for the same - // `role`. - // `bindings` with no members will result in an error. - Bindings []*Binding `json:"bindings,omitempty"` - - // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // If no `etag` is provided in the call to `setIamPolicy`, then the - // existing - // policy is overwritten blindly. - Etag string `json:"etag,omitempty"` - - // Version: Version of the `Policy`. The default version is 0. - Version int64 `json:"version,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bindings") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Bindings") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Policy) MarshalJSON() ([]byte, error) { - type noMethod Policy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PublishRequest: Request for the Publish method. -type PublishRequest struct { - // Messages: The messages to publish. - Messages []*PubsubMessage `json:"messages,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Messages") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Messages") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PublishRequest) MarshalJSON() ([]byte, error) { - type noMethod PublishRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PublishResponse: Response for the `Publish` method. -type PublishResponse struct { - // MessageIds: The server-assigned ID of each published message, in the - // same order as - // the messages in the request. IDs are guaranteed to be unique - // within - // the topic. - MessageIds []string `json:"messageIds,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "MessageIds") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MessageIds") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PublishResponse) MarshalJSON() ([]byte, error) { - type noMethod PublishResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PubsubMessage: A message data and its attributes. The message payload -// must not be empty; -// it must contain either a non-empty data field, or at least one -// attribute. -type PubsubMessage struct { - // Attributes: Optional attributes for this message. - Attributes map[string]string `json:"attributes,omitempty"` - - // Data: The message payload. - Data string `json:"data,omitempty"` - - // MessageId: ID of this message, assigned by the server when the - // message is published. - // Guaranteed to be unique within the topic. This value may be read by - // a - // subscriber that receives a `PubsubMessage` via a `Pull` call or a - // push - // delivery. It must not be populated by the publisher in a `Publish` - // call. - MessageId string `json:"messageId,omitempty"` - - // PublishTime: The time at which the message was published, populated - // by the server when - // it receives the `Publish` call. It must not be populated by - // the - // publisher in a `Publish` call. - PublishTime string `json:"publishTime,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Attributes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Attributes") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PubsubMessage) MarshalJSON() ([]byte, error) { - type noMethod PubsubMessage - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PullRequest: Request for the `Pull` method. -type PullRequest struct { - // MaxMessages: The maximum number of messages returned for this - // request. The Pub/Sub - // system may return fewer than the number specified. - MaxMessages int64 `json:"maxMessages,omitempty"` - - // ReturnImmediately: If this field set to true, the system will respond - // immediately even if - // it there are no messages available to return in the `Pull` - // response. - // Otherwise, the system may wait (for a bounded amount of time) until - // at - // least one message is available, rather than returning no messages. - // The - // client may cancel the request if it does not wish to wait any longer - // for - // the response. - ReturnImmediately bool `json:"returnImmediately,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MaxMessages") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MaxMessages") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PullRequest) MarshalJSON() ([]byte, error) { - type noMethod PullRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PullResponse: Response for the `Pull` method. -type PullResponse struct { - // ReceivedMessages: Received Pub/Sub messages. The Pub/Sub system will - // return zero messages if - // there are no more available in the backlog. The Pub/Sub system may - // return - // fewer than the `maxMessages` requested even if there are more - // messages - // available in the backlog. - ReceivedMessages []*ReceivedMessage `json:"receivedMessages,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ReceivedMessages") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ReceivedMessages") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *PullResponse) MarshalJSON() ([]byte, error) { - type noMethod PullResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PushConfig: Configuration for a push delivery endpoint. -type PushConfig struct { - // Attributes: Endpoint configuration attributes. - // - // Every endpoint has a set of API supported attributes that can be used - // to - // control different aspects of the message delivery. - // - // The currently supported attribute is `x-goog-version`, which you - // can - // use to change the format of the pushed message. This - // attribute - // indicates the version of the data expected by the endpoint. - // This - // controls the shape of the pushed message (i.e., its fields and - // metadata). - // The endpoint version is based on the version of the Pub/Sub API. - // - // If not present during the `CreateSubscription` call, it will default - // to - // the version of the API used to make such call. If not present during - // a - // `ModifyPushConfig` call, its value will not be changed. - // `GetSubscription` - // calls will always return a valid version, even if the subscription - // was - // created without this attribute. - // - // The possible values for this attribute are: - // - // * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub - // API. - // * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub - // API. - Attributes map[string]string `json:"attributes,omitempty"` - - // PushEndpoint: A URL locating the endpoint to which messages should be - // pushed. - // For example, a Webhook endpoint might use "https://example.com/push". - PushEndpoint string `json:"pushEndpoint,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Attributes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Attributes") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PushConfig) MarshalJSON() ([]byte, error) { - type noMethod PushConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ReceivedMessage: A message and its corresponding acknowledgment ID. -type ReceivedMessage struct { - // AckId: This ID can be used to acknowledge the received message. - AckId string `json:"ackId,omitempty"` - - // Message: The message. - Message *PubsubMessage `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AckId") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AckId") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ReceivedMessage) MarshalJSON() ([]byte, error) { - type noMethod ReceivedMessage - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SetIamPolicyRequest: Request message for `SetIamPolicy` method. -type SetIamPolicyRequest struct { - // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. - Policy *Policy `json:"policy,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Policy") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Policy") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { - type noMethod SetIamPolicyRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Subscription: A subscription resource. -type Subscription struct { - // AckDeadlineSeconds: This value is the maximum time after a subscriber - // receives a message - // before the subscriber should acknowledge the message. After - // message - // delivery but before the ack deadline expires and before the message - // is - // acknowledged, it is an outstanding message and will not be - // delivered - // again during that time (on a best-effort basis). - // - // For pull subscriptions, this value is used as the initial value for - // the ack - // deadline. To override this value for a given message, - // call - // `ModifyAckDeadline` with the corresponding `ack_id` if - // using - // pull. - // The minimum custom deadline you can specify is 10 seconds. - // The maximum custom deadline you can specify is 600 seconds (10 - // minutes). - // If this parameter is 0, a default value of 10 seconds is used. - // - // For push delivery, this value is also used to set the request timeout - // for - // the call to the push endpoint. - // - // If the subscriber never acknowledges the message, the Pub/Sub - // system will eventually redeliver the message. - AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` - - // Name: The name of the subscription. It must have the - // format - // "projects/{project}/subscriptions/{subscription}". `{subscription}` - // must - // start with a letter, and contain only letters (`[A-Za-z]`), - // numbers - // (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes - // (`~`), - // plus (`+`) or percent signs (`%`). It must be between 3 and 255 - // characters - // in length, and it must not start with "goog". - Name string `json:"name,omitempty"` - - // PushConfig: If push delivery is used with this subscription, this - // field is - // used to configure it. An empty `pushConfig` signifies that the - // subscriber - // will pull and ack messages using API methods. - PushConfig *PushConfig `json:"pushConfig,omitempty"` - - // Topic: The name of the topic from which this subscription is - // receiving messages. - // Format is `projects/{project}/topics/{topic}`. - // The value of this field will be `_deleted-topic_` if the topic has - // been - // deleted. - Topic string `json:"topic,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "AckDeadlineSeconds") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AckDeadlineSeconds") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Subscription) MarshalJSON() ([]byte, error) { - type noMethod Subscription - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TestIamPermissionsRequest: Request message for `TestIamPermissions` -// method. -type TestIamPermissionsRequest struct { - // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM - // Overview](https://cloud.google.com/iam/docs/overview#permissions). - Permissions []string `json:"permissions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { - type noMethod TestIamPermissionsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TestIamPermissionsResponse: Response message for `TestIamPermissions` -// method. -type TestIamPermissionsResponse struct { - // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. - Permissions []string `json:"permissions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { - type noMethod TestIamPermissionsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Topic: A topic resource. -type Topic struct { - // Name: The name of the topic. It must have the - // format - // "projects/{project}/topics/{topic}". `{topic}` must start with a - // letter, - // and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes - // (`-`), - // underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or - // percent - // signs (`%`). It must be between 3 and 255 characters in length, and - // it - // must not start with "goog". - Name string `json:"name,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Name") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Name") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Topic) MarshalJSON() ([]byte, error) { - type noMethod Topic - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// method id "pubsub.projects.snapshots.getIamPolicy": - -type ProjectsSnapshotsGetIamPolicyCall struct { - s *Service - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy -// set. -func (r *ProjectsSnapshotsService) GetIamPolicy(resource string) *ProjectsSnapshotsGetIamPolicyCall { - c := &ProjectsSnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSnapshotsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsSnapshotsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsSnapshotsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsSnapshotsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSnapshotsGetIamPolicyCall) Context(ctx context.Context) *ProjectsSnapshotsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSnapshotsGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.snapshots.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:getIamPolicy", - // "httpMethod": "GET", - // "id": "pubsub.projects.snapshots.getIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/snapshots/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.snapshots.setIamPolicy": - -type ProjectsSnapshotsSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -func (r *ProjectsSnapshotsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSnapshotsSetIamPolicyCall { - c := &ProjectsSnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSnapshotsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsSnapshotsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSnapshotsSetIamPolicyCall) Context(ctx context.Context) *ProjectsSnapshotsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSnapshotsSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.snapshots.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:setIamPolicy", - // "httpMethod": "POST", - // "id": "pubsub.projects.snapshots.setIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/snapshots/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:setIamPolicy", - // "request": { - // "$ref": "SetIamPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.snapshots.testIamPermissions": - -type ProjectsSnapshotsTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. -func (r *ProjectsSnapshotsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsSnapshotsTestIamPermissionsCall { - c := &ProjectsSnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSnapshotsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsSnapshotsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSnapshotsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsSnapshotsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSnapshotsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.snapshots.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsSnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - // "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}:testIamPermissions", - // "httpMethod": "POST", - // "id": "pubsub.projects.snapshots.testIamPermissions", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/snapshots/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:testIamPermissions", - // "request": { - // "$ref": "TestIamPermissionsRequest" - // }, - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.acknowledge": - -type ProjectsSubscriptionsAcknowledgeCall struct { - s *Service - subscription string - acknowledgerequest *AcknowledgeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Acknowledge: Acknowledges the messages associated with the `ack_ids` -// in the -// `AcknowledgeRequest`. The Pub/Sub system can remove the relevant -// messages -// from the subscription. -// -// Acknowledging a message whose ack deadline has expired may -// succeed, -// but such a message may be redelivered later. Acknowledging a message -// more -// than once will not result in an error. -func (r *ProjectsSubscriptionsService) Acknowledge(subscription string, acknowledgerequest *AcknowledgeRequest) *ProjectsSubscriptionsAcknowledgeCall { - c := &ProjectsSubscriptionsAcknowledgeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - c.acknowledgerequest = acknowledgerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsAcknowledgeCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsAcknowledgeCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsAcknowledgeCall) Context(ctx context.Context) *ProjectsSubscriptionsAcknowledgeCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSubscriptionsAcknowledgeCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSubscriptionsAcknowledgeCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.acknowledgerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:acknowledge") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.acknowledge" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsAcknowledgeCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Acknowledges the messages associated with the `ack_ids` in the\n`AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages\nfrom the subscription.\n\nAcknowledging a message whose ack deadline has expired may succeed,\nbut such a message may be redelivered later. Acknowledging a message more\nthan once will not result in an error.", - // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:acknowledge", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.acknowledge", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The subscription whose message is being acknowledged.\nFormat is `projects/{project}/subscriptions/{sub}`.", - // "location": "path", - // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}:acknowledge", - // "request": { - // "$ref": "AcknowledgeRequest" - // }, - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.create": - -type ProjectsSubscriptionsCreateCall struct { - s *Service - name string - subscription *Subscription - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Creates a subscription to a given topic. -// If the subscription already exists, returns `ALREADY_EXISTS`. -// If the corresponding topic doesn't exist, returns `NOT_FOUND`. -// -// If the name is not provided in the request, the server will assign a -// random -// name for this subscription on the same project as the topic, -// conforming -// to the -// [resource name -// format](https://cloud.google.com/pubsub/docs/overview#names). -// The generated name is populated in the returned Subscription -// object. -// Note that for REST API requests, you must specify a name in the -// request. -func (r *ProjectsSubscriptionsService) Create(name string, subscription *Subscription) *ProjectsSubscriptionsCreateCall { - c := &ProjectsSubscriptionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.subscription = subscription - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsCreateCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsCreateCall) Context(ctx context.Context) *ProjectsSubscriptionsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSubscriptionsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSubscriptionsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.create" call. -// Exactly one of *Subscription or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Subscription.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSubscriptionsCreateCall) Do(opts ...googleapi.CallOption) (*Subscription, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Subscription{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a subscription to a given topic.\nIf the subscription already exists, returns `ALREADY_EXISTS`.\nIf the corresponding topic doesn't exist, returns `NOT_FOUND`.\n\nIf the name is not provided in the request, the server will assign a random\nname for this subscription on the same project as the topic, conforming\nto the\n[resource name format](https://cloud.google.com/pubsub/docs/overview#names).\nThe generated name is populated in the returned Subscription object.\nNote that for REST API requests, you must specify a name in the request.", - // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - // "httpMethod": "PUT", - // "id": "pubsub.projects.subscriptions.create", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The name of the subscription. It must have the format\n`\"projects/{project}/subscriptions/{subscription}\"`. `{subscription}` must\nstart with a letter, and contain only letters (`[A-Za-z]`), numbers\n(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),\nplus (`+`) or percent signs (`%`). It must be between 3 and 255 characters\nin length, and it must not start with `\"goog\"`.", - // "location": "path", - // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "request": { - // "$ref": "Subscription" - // }, - // "response": { - // "$ref": "Subscription" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.delete": - -type ProjectsSubscriptionsDeleteCall struct { - s *Service - subscription string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes an existing subscription. All messages retained in -// the subscription -// are immediately dropped. Calls to `Pull` after deletion will -// return -// `NOT_FOUND`. After a subscription is deleted, a new one may be -// created with -// the same name, but the new one has no association with the -// old -// subscription or its topic unless the same topic is specified. -func (r *ProjectsSubscriptionsService) Delete(subscription string) *ProjectsSubscriptionsDeleteCall { - c := &ProjectsSubscriptionsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsDeleteCall) Context(ctx context.Context) *ProjectsSubscriptionsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSubscriptionsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSubscriptionsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes an existing subscription. All messages retained in the subscription\nare immediately dropped. Calls to `Pull` after deletion will return\n`NOT_FOUND`. After a subscription is deleted, a new one may be created with\nthe same name, but the new one has no association with the old\nsubscription or its topic unless the same topic is specified.", - // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - // "httpMethod": "DELETE", - // "id": "pubsub.projects.subscriptions.delete", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The subscription to delete.\nFormat is `projects/{project}/subscriptions/{sub}`.", - // "location": "path", - // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.get": - -type ProjectsSubscriptionsGetCall struct { - s *Service - subscription string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Gets the configuration details of a subscription. -func (r *ProjectsSubscriptionsService) Get(subscription string) *ProjectsSubscriptionsGetCall { - c := &ProjectsSubscriptionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsGetCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsSubscriptionsGetCall) IfNoneMatch(entityTag string) *ProjectsSubscriptionsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsGetCall) Context(ctx context.Context) *ProjectsSubscriptionsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSubscriptionsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSubscriptionsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.get" call. -// Exactly one of *Subscription or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Subscription.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSubscriptionsGetCall) Do(opts ...googleapi.CallOption) (*Subscription, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Subscription{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the configuration details of a subscription.", - // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", - // "httpMethod": "GET", - // "id": "pubsub.projects.subscriptions.get", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The name of the subscription to get.\nFormat is `projects/{project}/subscriptions/{sub}`.", - // "location": "path", - // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}", - // "response": { - // "$ref": "Subscription" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.getIamPolicy": - -type ProjectsSubscriptionsGetIamPolicyCall struct { - s *Service - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy -// set. -func (r *ProjectsSubscriptionsService) GetIamPolicy(resource string) *ProjectsSubscriptionsGetIamPolicyCall { - c := &ProjectsSubscriptionsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsSubscriptionsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsSubscriptionsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsGetIamPolicyCall) Context(ctx context.Context) *ProjectsSubscriptionsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSubscriptionsGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSubscriptionsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:getIamPolicy", - // "httpMethod": "GET", - // "id": "pubsub.projects.subscriptions.getIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.list": - -type ProjectsSubscriptionsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists matching subscriptions. -func (r *ProjectsSubscriptionsService) List(project string) *ProjectsSubscriptionsListCall { - c := &ProjectsSubscriptionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// PageSize sets the optional parameter "pageSize": Maximum number of -// subscriptions to return. -func (c *ProjectsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsSubscriptionsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListSubscriptionsResponse`; indicates that -// this is a continuation of a prior `ListSubscriptions` call, and that -// the -// system should return the next page of data. -func (c *ProjectsSubscriptionsListCall) PageToken(pageToken string) *ProjectsSubscriptionsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsSubscriptionsListCall) IfNoneMatch(entityTag string) *ProjectsSubscriptionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsListCall) Context(ctx context.Context) *ProjectsSubscriptionsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSubscriptionsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSubscriptionsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+project}/subscriptions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.list" call. -// Exactly one of *ListSubscriptionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ListSubscriptionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsSubscriptionsListCall) Do(opts ...googleapi.CallOption) (*ListSubscriptionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListSubscriptionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists matching subscriptions.", - // "flatPath": "v1/projects/{projectsId}/subscriptions", - // "httpMethod": "GET", - // "id": "pubsub.projects.subscriptions.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "pageSize": { - // "description": "Maximum number of subscriptions to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that\nthis is a continuation of a prior `ListSubscriptions` call, and that the\nsystem should return the next page of data.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "The name of the cloud project that subscriptions belong to.\nFormat is `projects/{project}`.", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+project}/subscriptions", - // "response": { - // "$ref": "ListSubscriptionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsSubscriptionsListCall) Pages(ctx context.Context, f func(*ListSubscriptionsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "pubsub.projects.subscriptions.modifyAckDeadline": - -type ProjectsSubscriptionsModifyAckDeadlineCall struct { - s *Service - subscription string - modifyackdeadlinerequest *ModifyAckDeadlineRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ModifyAckDeadline: Modifies the ack deadline for a specific message. -// This method is useful -// to indicate that more time is needed to process a message by -// the -// subscriber, or to make the message available for redelivery if -// the -// processing was interrupted. Note that this does not modify -// the -// subscription-level `ackDeadlineSeconds` used for subsequent messages. -func (r *ProjectsSubscriptionsService) ModifyAckDeadline(subscription string, modifyackdeadlinerequest *ModifyAckDeadlineRequest) *ProjectsSubscriptionsModifyAckDeadlineCall { - c := &ProjectsSubscriptionsModifyAckDeadlineCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - c.modifyackdeadlinerequest = modifyackdeadlinerequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyAckDeadlineCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Context(ctx context.Context) *ProjectsSubscriptionsModifyAckDeadlineCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSubscriptionsModifyAckDeadlineCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifyackdeadlinerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:modifyAckDeadline") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.modifyAckDeadline" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Modifies the ack deadline for a specific message. This method is useful\nto indicate that more time is needed to process a message by the\nsubscriber, or to make the message available for redelivery if the\nprocessing was interrupted. Note that this does not modify the\nsubscription-level `ackDeadlineSeconds` used for subsequent messages.", - // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyAckDeadline", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.modifyAckDeadline", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", - // "location": "path", - // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}:modifyAckDeadline", - // "request": { - // "$ref": "ModifyAckDeadlineRequest" - // }, - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.modifyPushConfig": - -type ProjectsSubscriptionsModifyPushConfigCall struct { - s *Service - subscription string - modifypushconfigrequest *ModifyPushConfigRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ModifyPushConfig: Modifies the `PushConfig` for a specified -// subscription. -// -// This may be used to change a push subscription to a pull one -// (signified by -// an empty `PushConfig`) or vice versa, or change the endpoint URL and -// other -// attributes of a push subscription. Messages will accumulate for -// delivery -// continuously through the call regardless of changes to the -// `PushConfig`. -func (r *ProjectsSubscriptionsService) ModifyPushConfig(subscription string, modifypushconfigrequest *ModifyPushConfigRequest) *ProjectsSubscriptionsModifyPushConfigCall { - c := &ProjectsSubscriptionsModifyPushConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - c.modifypushconfigrequest = modifypushconfigrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsModifyPushConfigCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyPushConfigCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsModifyPushConfigCall) Context(ctx context.Context) *ProjectsSubscriptionsModifyPushConfigCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSubscriptionsModifyPushConfigCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSubscriptionsModifyPushConfigCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifypushconfigrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:modifyPushConfig") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.modifyPushConfig" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsModifyPushConfigCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Modifies the `PushConfig` for a specified subscription.\n\nThis may be used to change a push subscription to a pull one (signified by\nan empty `PushConfig`) or vice versa, or change the endpoint URL and other\nattributes of a push subscription. Messages will accumulate for delivery\ncontinuously through the call regardless of changes to the `PushConfig`.", - // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:modifyPushConfig", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.modifyPushConfig", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The name of the subscription.\nFormat is `projects/{project}/subscriptions/{sub}`.", - // "location": "path", - // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}:modifyPushConfig", - // "request": { - // "$ref": "ModifyPushConfigRequest" - // }, - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.pull": - -type ProjectsSubscriptionsPullCall struct { - s *Service - subscription string - pullrequest *PullRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Pull: Pulls messages from the server. Returns an empty list if there -// are no -// messages available in the backlog. The server may return -// `UNAVAILABLE` if -// there are too many concurrent pull requests pending for the -// given -// subscription. -func (r *ProjectsSubscriptionsService) Pull(subscription string, pullrequest *PullRequest) *ProjectsSubscriptionsPullCall { - c := &ProjectsSubscriptionsPullCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.subscription = subscription - c.pullrequest = pullrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsPullCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsPullCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsPullCall) Context(ctx context.Context) *ProjectsSubscriptionsPullCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSubscriptionsPullCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSubscriptionsPullCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.pullrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+subscription}:pull") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "subscription": c.subscription, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.pull" call. -// Exactly one of *PullResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *PullResponse.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSubscriptionsPullCall) Do(opts ...googleapi.CallOption) (*PullResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &PullResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Pulls messages from the server. Returns an empty list if there are no\nmessages available in the backlog. The server may return `UNAVAILABLE` if\nthere are too many concurrent pull requests pending for the given\nsubscription.", - // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:pull", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.pull", - // "parameterOrder": [ - // "subscription" - // ], - // "parameters": { - // "subscription": { - // "description": "The subscription from which messages should be pulled.\nFormat is `projects/{project}/subscriptions/{sub}`.", - // "location": "path", - // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+subscription}:pull", - // "request": { - // "$ref": "PullRequest" - // }, - // "response": { - // "$ref": "PullResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.setIamPolicy": - -type ProjectsSubscriptionsSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -func (r *ProjectsSubscriptionsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSubscriptionsSetIamPolicyCall { - c := &ProjectsSubscriptionsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsSetIamPolicyCall) Context(ctx context.Context) *ProjectsSubscriptionsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSubscriptionsSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSubscriptionsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsSubscriptionsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:setIamPolicy", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.setIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:setIamPolicy", - // "request": { - // "$ref": "SetIamPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.subscriptions.testIamPermissions": - -type ProjectsSubscriptionsTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. -func (r *ProjectsSubscriptionsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsSubscriptionsTestIamPermissionsCall { - c := &ProjectsSubscriptionsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsSubscriptionsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsSubscriptionsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsSubscriptionsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsSubscriptionsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsSubscriptionsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.subscriptions.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsSubscriptionsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - // "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}:testIamPermissions", - // "httpMethod": "POST", - // "id": "pubsub.projects.subscriptions.testIamPermissions", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/subscriptions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:testIamPermissions", - // "request": { - // "$ref": "TestIamPermissionsRequest" - // }, - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.create": - -type ProjectsTopicsCreateCall struct { - s *Service - name string - topic *Topic - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Creates the given topic with the given name. -func (r *ProjectsTopicsService) Create(name string, topic *Topic) *ProjectsTopicsCreateCall { - c := &ProjectsTopicsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.topic = topic - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsCreateCall) Fields(s ...googleapi.Field) *ProjectsTopicsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsCreateCall) Context(ctx context.Context) *ProjectsTopicsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsTopicsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsTopicsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.topic) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.create" call. -// Exactly one of *Topic or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Topic.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsTopicsCreateCall) Do(opts ...googleapi.CallOption) (*Topic, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Topic{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates the given topic with the given name.", - // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", - // "httpMethod": "PUT", - // "id": "pubsub.projects.topics.create", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The name of the topic. It must have the format\n`\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter,\nand contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),\nunderscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent\nsigns (`%`). It must be between 3 and 255 characters in length, and it\nmust not start with `\"goog\"`.", - // "location": "path", - // "pattern": "^projects/[^/]+/topics/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "request": { - // "$ref": "Topic" - // }, - // "response": { - // "$ref": "Topic" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.delete": - -type ProjectsTopicsDeleteCall struct { - s *Service - topic string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the topic with the given name. Returns `NOT_FOUND` if -// the topic -// does not exist. After a topic is deleted, a new topic may be created -// with -// the same name; this is an entirely new topic with none of the -// old -// configuration or subscriptions. Existing subscriptions to this topic -// are -// not deleted, but their `topic` field is set to `_deleted-topic_`. -func (r *ProjectsTopicsService) Delete(topic string) *ProjectsTopicsDeleteCall { - c := &ProjectsTopicsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.topic = topic - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsDeleteCall) Fields(s ...googleapi.Field) *ProjectsTopicsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsDeleteCall) Context(ctx context.Context) *ProjectsTopicsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsTopicsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsTopicsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "topic": c.topic, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsTopicsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Empty{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the topic with the given name. Returns `NOT_FOUND` if the topic\ndoes not exist. After a topic is deleted, a new topic may be created with\nthe same name; this is an entirely new topic with none of the old\nconfiguration or subscriptions. Existing subscriptions to this topic are\nnot deleted, but their `topic` field is set to `_deleted-topic_`.", - // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", - // "httpMethod": "DELETE", - // "id": "pubsub.projects.topics.delete", - // "parameterOrder": [ - // "topic" - // ], - // "parameters": { - // "topic": { - // "description": "Name of the topic to delete.\nFormat is `projects/{project}/topics/{topic}`.", - // "location": "path", - // "pattern": "^projects/[^/]+/topics/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+topic}", - // "response": { - // "$ref": "Empty" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.get": - -type ProjectsTopicsGetCall struct { - s *Service - topic string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Gets the configuration of a topic. -func (r *ProjectsTopicsService) Get(topic string) *ProjectsTopicsGetCall { - c := &ProjectsTopicsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.topic = topic - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsGetCall) Fields(s ...googleapi.Field) *ProjectsTopicsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsTopicsGetCall) IfNoneMatch(entityTag string) *ProjectsTopicsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsGetCall) Context(ctx context.Context) *ProjectsTopicsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsTopicsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsTopicsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "topic": c.topic, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.get" call. -// Exactly one of *Topic or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Topic.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsTopicsGetCall) Do(opts ...googleapi.CallOption) (*Topic, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Topic{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the configuration of a topic.", - // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", - // "httpMethod": "GET", - // "id": "pubsub.projects.topics.get", - // "parameterOrder": [ - // "topic" - // ], - // "parameters": { - // "topic": { - // "description": "The name of the topic to get.\nFormat is `projects/{project}/topics/{topic}`.", - // "location": "path", - // "pattern": "^projects/[^/]+/topics/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+topic}", - // "response": { - // "$ref": "Topic" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.getIamPolicy": - -type ProjectsTopicsGetIamPolicyCall struct { - s *Service - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy -// set. -func (r *ProjectsTopicsService) GetIamPolicy(resource string) *ProjectsTopicsGetIamPolicyCall { - c := &ProjectsTopicsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsTopicsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsTopicsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsTopicsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsGetIamPolicyCall) Context(ctx context.Context) *ProjectsTopicsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsTopicsGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsTopicsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsTopicsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:getIamPolicy", - // "httpMethod": "GET", - // "id": "pubsub.projects.topics.getIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/topics/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.list": - -type ProjectsTopicsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists matching topics. -func (r *ProjectsTopicsService) List(project string) *ProjectsTopicsListCall { - c := &ProjectsTopicsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// PageSize sets the optional parameter "pageSize": Maximum number of -// topics to return. -func (c *ProjectsTopicsListCall) PageSize(pageSize int64) *ProjectsTopicsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListTopicsResponse`; indicates that this is -// a continuation of a prior `ListTopics` call, and that the system -// should -// return the next page of data. -func (c *ProjectsTopicsListCall) PageToken(pageToken string) *ProjectsTopicsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsTopicsListCall) IfNoneMatch(entityTag string) *ProjectsTopicsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsListCall) Context(ctx context.Context) *ProjectsTopicsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsTopicsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsTopicsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+project}/topics") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.list" call. -// Exactly one of *ListTopicsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListTopicsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsTopicsListCall) Do(opts ...googleapi.CallOption) (*ListTopicsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListTopicsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists matching topics.", - // "flatPath": "v1/projects/{projectsId}/topics", - // "httpMethod": "GET", - // "id": "pubsub.projects.topics.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "pageSize": { - // "description": "Maximum number of topics to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "The value returned by the last `ListTopicsResponse`; indicates that this is\na continuation of a prior `ListTopics` call, and that the system should\nreturn the next page of data.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "The name of the cloud project that topics belong to.\nFormat is `projects/{project}`.", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+project}/topics", - // "response": { - // "$ref": "ListTopicsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsTopicsListCall) Pages(ctx context.Context, f func(*ListTopicsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "pubsub.projects.topics.publish": - -type ProjectsTopicsPublishCall struct { - s *Service - topic string - publishrequest *PublishRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Publish: Adds one or more messages to the topic. Returns `NOT_FOUND` -// if the topic -// does not exist. The message payload must not be empty; it must -// contain -// either a non-empty data field, or at least one attribute. -func (r *ProjectsTopicsService) Publish(topic string, publishrequest *PublishRequest) *ProjectsTopicsPublishCall { - c := &ProjectsTopicsPublishCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.topic = topic - c.publishrequest = publishrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsPublishCall) Fields(s ...googleapi.Field) *ProjectsTopicsPublishCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsPublishCall) Context(ctx context.Context) *ProjectsTopicsPublishCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsTopicsPublishCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsTopicsPublishCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.publishrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}:publish") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "topic": c.topic, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.publish" call. -// Exactly one of *PublishResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *PublishResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsTopicsPublishCall) Do(opts ...googleapi.CallOption) (*PublishResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &PublishResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic\ndoes not exist. The message payload must not be empty; it must contain\n either a non-empty data field, or at least one attribute.", - // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:publish", - // "httpMethod": "POST", - // "id": "pubsub.projects.topics.publish", - // "parameterOrder": [ - // "topic" - // ], - // "parameters": { - // "topic": { - // "description": "The messages in the request will be published on this topic.\nFormat is `projects/{project}/topics/{topic}`.", - // "location": "path", - // "pattern": "^projects/[^/]+/topics/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+topic}:publish", - // "request": { - // "$ref": "PublishRequest" - // }, - // "response": { - // "$ref": "PublishResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.setIamPolicy": - -type ProjectsTopicsSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -func (r *ProjectsTopicsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsTopicsSetIamPolicyCall { - c := &ProjectsTopicsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsTopicsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsSetIamPolicyCall) Context(ctx context.Context) *ProjectsTopicsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsTopicsSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsTopicsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsTopicsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:setIamPolicy", - // "httpMethod": "POST", - // "id": "pubsub.projects.topics.setIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/topics/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:setIamPolicy", - // "request": { - // "$ref": "SetIamPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.testIamPermissions": - -type ProjectsTopicsTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. -func (r *ProjectsTopicsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTopicsTestIamPermissionsCall { - c := &ProjectsTopicsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsTopicsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsTopicsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsTopicsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsTopicsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsTopicsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}:testIamPermissions", - // "httpMethod": "POST", - // "id": "pubsub.projects.topics.testIamPermissions", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/topics/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:testIamPermissions", - // "request": { - // "$ref": "TestIamPermissionsRequest" - // }, - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// method id "pubsub.projects.topics.subscriptions.list": - -type ProjectsTopicsSubscriptionsListCall struct { - s *Service - topic string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists the name of the subscriptions for this topic. -func (r *ProjectsTopicsSubscriptionsService) List(topic string) *ProjectsTopicsSubscriptionsListCall { - c := &ProjectsTopicsSubscriptionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.topic = topic - return c -} - -// PageSize sets the optional parameter "pageSize": Maximum number of -// subscription names to return. -func (c *ProjectsTopicsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsTopicsSubscriptionsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": The value returned -// by the last `ListTopicSubscriptionsResponse`; indicates -// that this is a continuation of a prior `ListTopicSubscriptions` call, -// and -// that the system should return the next page of data. -func (c *ProjectsTopicsSubscriptionsListCall) PageToken(pageToken string) *ProjectsTopicsSubscriptionsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsTopicsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsSubscriptionsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsTopicsSubscriptionsListCall) IfNoneMatch(entityTag string) *ProjectsTopicsSubscriptionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsTopicsSubscriptionsListCall) Context(ctx context.Context) *ProjectsTopicsSubscriptionsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsTopicsSubscriptionsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsTopicsSubscriptionsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+topic}/subscriptions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "topic": c.topic, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "pubsub.projects.topics.subscriptions.list" call. -// Exactly one of *ListTopicSubscriptionsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListTopicSubscriptionsResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsTopicsSubscriptionsListCall) Do(opts ...googleapi.CallOption) (*ListTopicSubscriptionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListTopicSubscriptionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists the name of the subscriptions for this topic.", - // "flatPath": "v1/projects/{projectsId}/topics/{topicsId}/subscriptions", - // "httpMethod": "GET", - // "id": "pubsub.projects.topics.subscriptions.list", - // "parameterOrder": [ - // "topic" - // ], - // "parameters": { - // "pageSize": { - // "description": "Maximum number of subscription names to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates\nthat this is a continuation of a prior `ListTopicSubscriptions` call, and\nthat the system should return the next page of data.", - // "location": "query", - // "type": "string" - // }, - // "topic": { - // "description": "The name of the topic that subscriptions are attached to.\nFormat is `projects/{project}/topics/{topic}`.", - // "location": "path", - // "pattern": "^projects/[^/]+/topics/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+topic}/subscriptions", - // "response": { - // "$ref": "ListTopicSubscriptionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/pubsub" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsTopicsSubscriptionsListCall) Pages(ctx context.Context, f func(*ListTopicSubscriptionsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} From 762c73b79934fc8eca8214e6fe69f1d0b2bf9dad Mon Sep 17 00:00:00 2001 From: Matthew Wong Date: Wed, 27 Sep 2017 18:44:10 -0400 Subject: [PATCH 15/68] Add tag->push for openebs --- Makefile | 2 +- deploy.sh | 11 ++++++++++- openebs/Makefile | 3 ++- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 058631b7eee..069c07ddac3 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ all: aws/efs ceph/cephfs ceph/rbd flex gluster/block gluster/glusterfs iscsi/targetd local-volume/provisioner local-volume/bootstrapper nfs-client nfs snapshot openstack/standalone-cinder .PHONY: all -clean: clean-aws/efs clean-ceph/cephfs clean-ceph/rbd clean-flex clean-gluster/block clean-gluster/glusterfs clean-iscsi/targetd clean-local-volume/provisioner clean-local-volume/bootstrapper clean-nfs-client clean-nfs clean-snapshot clean-openstack/standalone-cinder +clean: clean-aws/efs clean-ceph/cephfs clean-ceph/rbd clean-flex clean-gluster/block clean-gluster/glusterfs clean-iscsi/targetd clean-local-volume/provisioner clean-local-volume/bootstrapper clean-nfs-client clean-nfs clean-openebs clean-snapshot clean-openstack/standalone-cinder .PHONY: clean test: test-aws/efs test-local-volume/provisioner test-nfs test-snapshot diff --git a/deploy.sh b/deploy.sh index b5a93a73a73..640f969ee89 100755 --- a/deploy.sh +++ b/deploy.sh @@ -32,6 +32,7 @@ local-volume-provisioner-bootstrap local-volume-provisioner nfs-client-provisioner nfs-provisioner +openebs-provisioner rbd-provisioner ) @@ -43,7 +44,15 @@ if [[ "${TRAVIS_TAG}" =~ $regex ]]; then export REGISTRY=quay.io/kubernetes_incubator/ fi echo "Pushing image '${PROVISIONER}' with tags '${VERSION}' and 'latest' to '${REGISTRY}'." - make push-"${PROVISIONER}" + if [[ "${PROVISIONER}" = openebs-provisioner ]]; then + export DIMAGE="${REGISTRY}openebs-provisioner" + export DNAME="${QUAY_USERNAME}" + export DPASS="${QUAY_PASSWORD}" + pushd openebs; make; popd + make deploy-openebs-provisioner + else + make push-"${PROVISIONER}" + fi else echo "Nothing to deploy" fi diff --git a/openebs/Makefile b/openebs/Makefile index 4ad0eaca693..5a1945b4098 100644 --- a/openebs/Makefile +++ b/openebs/Makefile @@ -31,4 +31,5 @@ deploy: clean: rm -rf vendor - rm openebs-provisioner + rm -f openebs-provisioner + rm -f buildscripts/docker/openebs-provisioner From f6c37ec03b89e93a6dae8861f3a82dc3e75ff1be Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Wed, 27 Sep 2017 13:32:55 +0000 Subject: [PATCH 16/68] iscsi: clean up codes --- iscsi/targetd/cmd/root.go | 8 +- iscsi/targetd/cmd/start.go | 28 +------ .../targetd/provisioner/iscsi-provisioner.go | 80 +++---------------- 3 files changed, 20 insertions(+), 96 deletions(-) diff --git a/iscsi/targetd/cmd/root.go b/iscsi/targetd/cmd/root.go index 006c9d6b660..a9c358eff95 100644 --- a/iscsi/targetd/cmd/root.go +++ b/iscsi/targetd/cmd/root.go @@ -43,10 +43,6 @@ func Execute() { func init() { cobra.OnInitialize(initConfig) - // Here you will define your flags and configuration settings. - // Cobra supports Persistent Flags, which, if defined here, - // will be global for your application. - RootCmd.PersistentFlags().String("log-level", "info", "log level") viper.BindPFlag("log-level", RootCmd.PersistentFlags().Lookup("log-level")) @@ -55,5 +51,7 @@ func init() { // initConfig reads in config file and ENV variables if set. func initConfig() { viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) - viper.AutomaticEnv() // read in environment variables that match + + // read in environment variables that match + viper.AutomaticEnv() } diff --git a/iscsi/targetd/cmd/start.go b/iscsi/targetd/cmd/start.go index 6d2c4c22032..a90f29f964a 100644 --- a/iscsi/targetd/cmd/start.go +++ b/iscsi/targetd/cmd/start.go @@ -35,13 +35,8 @@ var log = logrus.New() // start-controllerCmd represents the start-controller command var startcontrollerCmd = &cobra.Command{ Use: "start", - Short: "A brief description of your command", - Long: `A longer description that spans multiple lines and likely contains examples -and usage of using your command. For example: - -Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, + Short: "Start an iscsi dynamic provisioner", + Long: `Start an iscsi dynamic provisioner`, Run: func(cmd *cobra.Command, args []string) { initLog() log.Debugln("start called") @@ -63,7 +58,7 @@ to quickly create a Cobra application.`, "config-host": config.Host, }).Debugln("kube client config created") - // creates the clientset + // creates the clientset log.Debugln("creating kube client set") kubernetesClientSet, err := kubernetes.NewForConfig(config) if err != nil { @@ -78,18 +73,14 @@ to quickly create a Cobra application.`, log.Fatalf("Error getting server version: %v", err) } - //url := viper.GetString("targetd-scheme") + "://" + viper.GetString("targetd-username") + ":" + viper.GetString("targetd-password") + "@" + viper.GetString("targetd-address") + ":" + viper.GetInt("targetd-port") - url := fmt.Sprintf("%s://%s:%s@%s:%d/targetrpc", viper.GetString("targetd-scheme"), viper.GetString("targetd-username"), viper.GetString("targetd-password"), viper.GetString("targetd-address"), viper.GetInt("targetd-port")) log.Debugln("targed URL", url) iscsiProvisioner := provisioner.NewiscsiProvisioner(url) log.Debugln("iscsi provisioner created") + pc := controller.NewProvisionController(kubernetesClientSet, viper.GetString("provisioner-name"), iscsiProvisioner, serverVersion.GitVersion) - // pc := controller.NewProvisionController(kubernetesClientSet, viper.GetDuration("resync-period"), viper.GetString("provisioner-name"), iscsiProvisioner, serverVersion.GitVersion, - // viper.GetBool("exponential-backoff-on-error"), viper.GetInt("fail-retry-threshold"), viper.GetDuration("lease-period"), - // viper.GetDuration("renew-deadline"), viper.GetDuration("retry-priod"), viper.GetDuration("term-limit")) controller.ResyncPeriod(viper.GetDuration("resync-period")) controller.ExponentialBackOffOnError(viper.GetBool("exponential-backoff-on-error")) controller.FailedProvisionThreshold(viper.GetInt("fail-retry-threshold")) @@ -133,21 +124,10 @@ func init() { viper.BindPFlag("targetd-port", startcontrollerCmd.Flags().Lookup("targetd-port")) startcontrollerCmd.Flags().String("default-fs", "xfs", "filesystem to use when not specified") viper.BindPFlag("default-fs", startcontrollerCmd.Flags().Lookup("default-fs")) - startcontrollerCmd.Flags().String("master", "", "Master URL") viper.BindPFlag("master", startcontrollerCmd.Flags().Lookup("master")) startcontrollerCmd.Flags().String("kubeconfig", "", "Absolute path to the kubeconfig") viper.BindPFlag("kubeconfig", startcontrollerCmd.Flags().Lookup("kubeconfig")) - // Here you will define your flags and configuration settings. - - // Cobra supports Persistent Flags which will work for this command - // and all subcommands, e.g.: - // start-controllerCmd.PersistentFlags().String("foo", "", "A help for foo") - - // Cobra supports local flags which will only run when this command - // is called directly, e.g.: - // start-controllerCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") - } func initLog() { diff --git a/iscsi/targetd/provisioner/iscsi-provisioner.go b/iscsi/targetd/provisioner/iscsi-provisioner.go index 18613384075..064c340f879 100644 --- a/iscsi/targetd/provisioner/iscsi-provisioner.go +++ b/iscsi/targetd/provisioner/iscsi-provisioner.go @@ -19,17 +19,14 @@ package provisioner import ( "errors" "fmt" - //"net/rpc" - //"net/rpc/jsonrpc" "sort" "strconv" "strings" "github.com/Sirupsen/logrus" - "github.com/powerman/rpc-codec/jsonrpc2" - //"github.com/kubernetes-incubator/external-storage/iscsi/targetd/provisioner/jsonrpc2" "github.com/kubernetes-incubator/external-storage/lib/controller" "github.com/kubernetes-incubator/external-storage/lib/util" + "github.com/powerman/rpc-codec/jsonrpc2" "github.com/spf13/viper" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -76,8 +73,6 @@ type export struct { type exportList []export -type result int - // NewiscsiProvisioner creates new iscsi provisioner func NewiscsiProvisioner(url string) controller.Provisioner { @@ -113,14 +108,6 @@ func (p *iscsiProvisioner) Provision(options controller.VolumeOptions) (*v1.Pers annotations["volume_name"] = vol annotations["pool"] = pool annotations["initiators"] = options.Parameters["initiators"] - // annotations[annExportBlock] = exportBlock - // annotations[annExportID] = strconv.FormatUint(uint64(exportID), 10) - // annotations[annProjectBlock] = projectBlock - // annotations[annProjectID] = strconv.FormatUint(uint64(projectID), 10) - // if supGroup != 0 { - // annotations[VolumeGidAnnotationKey] = strconv.FormatUint(supGroup, 10) - // } - // annotations[annProvisionerID] = string(p.identity) pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ @@ -254,9 +241,8 @@ func (p *iscsiProvisioner) getInitiators(options controller.VolumeOptions) []str return strings.Split(options.Parameters["initiators"], ",") } +// getFirstAvailableLun gets first available Lun. func getFirstAvailableLun(exportList exportList) (int32, error) { - log.Debug("export List: ", exportList) - sort.Sort(exportList) log.Debug("sorted export List: ", exportList) //this is sloppy way to remove duplicates @@ -283,21 +269,18 @@ func getFirstAvailableLun(exportList exportList) (int32, error) { sluns = luns[0:] sort.Sort(sluns) log.Debug("sorted lun list: ", sluns) - lun := int32(-1) + + lun := int32(len(sluns)) for i, clun := range sluns { if i < int(clun) { lun = int32(i) break } } - if lun == -1 { - lun = int32(len(sluns)) - } return lun, nil - //return 0, nil } -////// json rpc operations //// +// volDestroy removes calls vol_destroy targetd API to remove volume. func (p *iscsiProvisioner) volDestroy(vol string, pool string) error { client, err := p.getConnection() defer client.Close() @@ -305,98 +288,75 @@ func (p *iscsiProvisioner) volDestroy(vol string, pool string) error { log.Warnln(err) return err } - - //make arguments object args := volDestroyArgs{ Pool: pool, Name: vol, } - //this will store returned result - var result1 result - //call remote procedure with args - err = client.Call("vol_destroy", args, &result1) + err = client.Call("vol_destroy", args, nil) return err } +// exportDestroy calls export_destroy targetd API to remove export of volume. func (p *iscsiProvisioner) exportDestroy(vol string, pool string, initiator string) error { - client, err := p.getConnection() defer client.Close() if err != nil { log.Warnln(err) return err } - - //make arguments object args := exportDestroyArgs{ Pool: pool, Vol: vol, InitiatorWwn: initiator, } - //this will store returned result - var result1 result - //call remote procedure with args - err = client.Call("export_destroy", args, &result1) + err = client.Call("export_destroy", args, nil) return err } +// volCreate calls vol_create targetd API to create a volume. func (p *iscsiProvisioner) volCreate(name string, size int64, pool string) error { - client, err := p.getConnection() defer client.Close() if err != nil { log.Warnln(err) return err } - - //make arguments object args := volCreateArgs{ Pool: pool, Name: name, Size: size, } - //this will store returned result - var result1 result - //call remote procedure with args - err = client.Call("vol_create", args, &result1) + err = client.Call("vol_create", args, nil) return err } +// exportCreate calls export_create targetd API to create an export of volume. func (p *iscsiProvisioner) exportCreate(vol string, lun int32, pool string, initiator string) error { - client, err := p.getConnection() defer client.Close() if err != nil { log.Warnln(err) return err } - - //make arguments object args := exportCreateArgs{ Pool: pool, Vol: vol, InitiatorWwn: initiator, Lun: lun, } - //this will store returned result - var result1 result - //call remote procedure with args - err = client.Call("export_create", args, &result1) + err = client.Call("export_create", args, nil) return err } +// exportList lists calls export_list targetd API to get export objects. func (p *iscsiProvisioner) exportList() (exportList, error) { - client, err := p.getConnection() defer client.Close() if err != nil { log.Warnln(err) return nil, err } - - //this will store returned result var result1 exportList - //call remote procedure with args err = client.Call("export_list", nil, &result1) return result1, err } @@ -417,7 +377,6 @@ func (p *iscsiProvisioner) getConnection() (*jsonrpc2.Client, error) { log.Debugln("opening connection to targetd: ", p.targetdURL) client := jsonrpc2.NewHTTPClient(p.targetdURL) - if client == nil { log.Warnln("error creating the connection to targetd", p.targetdURL) return nil, errors.New("error creating the connection to targetd") @@ -425,16 +384,3 @@ func (p *iscsiProvisioner) getConnection() (*jsonrpc2.Client, error) { log.Debugln("targetd client created") return client, nil } - -//func (p *iscsiProvisioner) getConnection2() (*rpc.Client, error) { -// log.Debugln("opening connection to targetd: ", p.targetdURL) -// -// client, err := jsonrpc.Dial("tcp", p.targetdURL) -// -// if err != nil { -// log.Warnln(err) -// return nil, err -// } -// log.Debugln("targetd client created") -// return client, nil -//} From cc5714cc6498593d697bea6396631495e94ccd1c Mon Sep 17 00:00:00 2001 From: Serguei Bezverkhi Date: Thu, 7 Sep 2017 13:44:37 -0400 Subject: [PATCH 17/68] [local-volume] Refactor local volume configMap for YAML Closes #343 --- local-volume/bootstrapper/README.md | 23 ++-- local-volume/bootstrapper/bootstrapper.go | 48 ++++---- .../kubernetes/config-gce-local-ssd.yaml | 8 +- .../deployment/kubernetes/example-config.yaml | 24 ++-- local-volume/provisioner/cmd/main.go | 24 ++-- local-volume/provisioner/pkg/common/common.go | 115 +++++++++++------- 6 files changed, 132 insertions(+), 110 deletions(-) diff --git a/local-volume/bootstrapper/README.md b/local-volume/bootstrapper/README.md index 880d4169eaa..f3ba24164dd 100644 --- a/local-volume/bootstrapper/README.md +++ b/local-volume/bootstrapper/README.md @@ -50,20 +50,15 @@ metadata: name: local-volume-config namespace: kube-system data: - "local-fast": | - { - "hostDir": "/mnt/ssds", - "mountDir": "/local-ssds" - } - "local-slow": | - { - "hostDir": "/mnt/hdds", - "mountDir": "/local-hdds" - } - "local-storage": | - { - "hostDir": "/mnt/disks" - } + storageClassMap: | + local-fast: + hostDir: "/mnt/ssds" + mountDir: "/local-ssds" + local-slow: + hostDir: "/mnt/hdds" + mountDir: "/local-hdds" + local-storage: + hostDir: "/mnt/disks" ``` ### Command line options diff --git a/local-volume/bootstrapper/bootstrapper.go b/local-volume/bootstrapper/bootstrapper.go index 9115210c010..4ca94ef269e 100644 --- a/local-volume/bootstrapper/bootstrapper.go +++ b/local-volume/bootstrapper/bootstrapper.go @@ -58,6 +58,8 @@ var ( serviceAccountName = flag.String("serviceaccount", defaultServiceAccountName, "Name of the service accout for local volume provisioner") ) +var provisionerConfig common.ProvisionerConfiguration + // setupClient creates an in cluster kubernetes client. func setupClient() *kubernetes.Clientset { config, err := rest.InClusterConfig() @@ -90,35 +92,35 @@ func generateMountDir(mount *common.MountConfig) string { // ensureVolumeConfig reads volume configurations from given configmap, and create a default one // if not already exist. -func ensureVolumeConfig(client *kubernetes.Clientset, namespace string) (map[string]common.MountConfig, error) { +func ensureVolumeConfig(client *kubernetes.Clientset, namespace string, config *common.ProvisionerConfiguration) error { // Get config map from user or from a default configmap (if created before). - config, err := common.GetVolumeConfigFromConfigMap(client, namespace, *volumeConfigName) + err := common.GetVolumeConfigFromConfigMap(client, namespace, *volumeConfigName, config) if err != nil && *volumeConfigName != defaultVolumeConfigName { // If configmap is provided by user but we have problem getting it, fail fast. - return nil, fmt.Errorf("could not get config map: %v", err) + return fmt.Errorf("could not get config map: %v", err) } else if err != nil && errors.IsNotFound(err) { // configmap is not provided by user and default configmap doesn't exist, create one. glog.Infof("No config is given, creating default configmap %v", *volumeConfigName) - config = common.GetDefaultVolumeConfig() + common.GetDefaultVolumeConfig(config) if err = createConfigMap(client, namespace, config); err != nil { - return nil, fmt.Errorf("unable to create configmap: %v", err) + return fmt.Errorf("unable to create configmap: %v", err) } } else if err != nil { // error exists, it might be that default configmap is damanged, fail fast. - return nil, fmt.Errorf("could not get default config map: %v", err) + return fmt.Errorf("could not get default config map: %v", err) } - return config, nil + return nil } // ensureMountDir checks if each storageclass's mount config has 'MountDir' set; if not, it will // automatically generate one and informs an update on configmap is required. -func ensureMountDir(config map[string]common.MountConfig) bool { +func ensureMountDir(config *common.ProvisionerConfiguration) bool { needsUpdate := false - for class, mount := range config { + for class, mount := range config.StorageClassConfig { if mount.MountDir == "" { newMoutConfig := mount newMoutConfig.MountDir = generateMountDir(&mount) - config[class] = newMoutConfig + config.StorageClassConfig[class] = newMoutConfig needsUpdate = true } } @@ -126,7 +128,7 @@ func ensureMountDir(config map[string]common.MountConfig) bool { } // updateConfigMap ... -func updateConfigMap(client *kubernetes.Clientset, namespace string, config map[string]common.MountConfig) error { +func updateConfigMap(client *kubernetes.Clientset, namespace string, config *common.ProvisionerConfiguration) error { var err error configMap, err := client.CoreV1().ConfigMaps(namespace).Get(*volumeConfigName, metav1.GetOptions{}) if err != nil { @@ -141,7 +143,7 @@ func updateConfigMap(client *kubernetes.Clientset, namespace string, config map[ } // createConfigMap ... -func createConfigMap(client *kubernetes.Clientset, namespace string, config map[string]common.MountConfig) error { +func createConfigMap(client *kubernetes.Clientset, namespace string, config *common.ProvisionerConfiguration) error { data, err := common.VolumeConfigToConfigMapData(config) if err != nil { glog.Fatalf("Unable to convert volume config to configmap %v\n", err) @@ -232,10 +234,10 @@ func createClusterRoleBinding(client *kubernetes.Clientset, namespace string) er } // createDaemonSet ... -func createDaemonSet(client *kubernetes.Clientset, namespace string, config map[string]common.MountConfig) error { +func createDaemonSet(client *kubernetes.Clientset, namespace string, config *common.ProvisionerConfiguration) error { volumes := []v1.Volume{} volumeMounts := []v1.VolumeMount{} - for _, mount := range config { + for _, mount := range config.StorageClassConfig { name := generateMountName(&mount) volumes = append(volumes, v1.Volume{ Name: name, @@ -323,7 +325,6 @@ func createDaemonSet(client *kubernetes.Clientset, namespace string, config map[ }, }, } - _, err := client.Extensions().DaemonSets(namespace).Create(&daemonSet) return err } @@ -340,18 +341,19 @@ func main() { client := setupClient() var err error - var config map[string]common.MountConfig - if config, err = ensureVolumeConfig(client, namespace); err != nil { + + provisionerConfig = common.ProvisionerConfiguration{ + StorageClassConfig: make(map[string]common.MountConfig), + } + if err = ensureVolumeConfig(client, namespace, &provisionerConfig); err != nil { glog.Fatalf("Unable to ensure volume config: %v", err) } - - if ensureMountDir(config) { - if err = updateConfigMap(client, namespace, config); err != nil { + if ensureMountDir(&provisionerConfig) { + if err = updateConfigMap(client, namespace, &provisionerConfig); err != nil { glog.Fatalf("Could not update config map to use generated mountdir: %v", err) } } - - glog.Infof("Running bootstrap pod with config %v: %+v\n", *volumeConfigName, config) + glog.Infof("Running bootstrap pod with config %v: %+v\n", *volumeConfigName, provisionerConfig) if err = createServiceAccount(client, namespace); err != nil && !errors.IsAlreadyExists(err) { glog.Fatalf("Unable to create service account: %v\n", err) @@ -359,7 +361,7 @@ func main() { if err = createClusterRoleBinding(client, namespace); err != nil && !errors.IsAlreadyExists(err) { glog.Fatalf("Unable to create cluster role bindings: %v\n", err) } - if err = createDaemonSet(client, namespace, config); err != nil { + if err = createDaemonSet(client, namespace, &provisionerConfig); err != nil { glog.Fatalf("Unable to create daemonset: %v\n", err) } diff --git a/local-volume/bootstrapper/deployment/kubernetes/config-gce-local-ssd.yaml b/local-volume/bootstrapper/deployment/kubernetes/config-gce-local-ssd.yaml index f561ce9f94b..dab1d676465 100644 --- a/local-volume/bootstrapper/deployment/kubernetes/config-gce-local-ssd.yaml +++ b/local-volume/bootstrapper/deployment/kubernetes/config-gce-local-ssd.yaml @@ -1,11 +1,11 @@ # The config map is used to configure local volume discovery for Local SSDs on GCE and GKE. # It is a map from storage class to its mount configuration. +apiVersion: v1 kind: ConfigMap metadata: name: local-volume-config namespace: kube-system data: - "local-storage": | - { - "hostDir": "/mnt/disks" - } + storageClassMap: | + local-storage: + hostDir: "/mnt/disks" diff --git a/local-volume/bootstrapper/deployment/kubernetes/example-config.yaml b/local-volume/bootstrapper/deployment/kubernetes/example-config.yaml index 0128ef3af29..1be63662856 100644 --- a/local-volume/bootstrapper/deployment/kubernetes/example-config.yaml +++ b/local-volume/bootstrapper/deployment/kubernetes/example-config.yaml @@ -1,21 +1,17 @@ # The config map is used to configure local volume discovery; it is a map from # storage class to its mount configuration. +apiVersion: v1 kind: ConfigMap metadata: name: local-volume-config namespace: kube-system data: - "local-fast": | - { - "hostDir": "/mnt/ssds", - "mountDir": "/local-ssds" - } - "local-slow": | - { - "hostDir": "/mnt/hdds", - "mountDir": "/local-hdds" - } - "local-storage": | - { - "hostDir": "/mnt/disks" - } + storageClassMap: | + local-fast: + hostDir: "/mnt/ssds" + mountDir: "/local-ssds" + local-slow: + hostDir: "/mnt/hdds" + mountDir: "/local-hdds" + local-storage: + hostDir: "/mnt/disks" diff --git a/local-volume/provisioner/cmd/main.go b/local-volume/provisioner/cmd/main.go index d1a8da06ea3..41b125f93f6 100644 --- a/local-volume/provisioner/cmd/main.go +++ b/local-volume/provisioner/cmd/main.go @@ -30,6 +30,18 @@ import ( "k8s.io/client-go/rest" ) +var provisionerConfig common.ProvisionerConfiguration + +func init() { + provisionerConfig = common.ProvisionerConfiguration{ + StorageClassConfig: make(map[string]common.MountConfig), + } + if err := common.LoadProvisionerConfigs(&provisionerConfig); err != nil { + glog.Fatalf("Error parsing Provisioner's configuration: %#v. Exiting...\n", err) + } + glog.Infof("Configuration parsing has been completed, ready to run...") +} + func setupClient() *kubernetes.Clientset { config, err := rest.InClusterConfig() if err != nil { @@ -58,7 +70,7 @@ func main() { glog.Info("Starting controller\n") controller.StartLocalController(client, &common.UserConfig{ Node: node, - DiscoveryMap: createDiscoveryMap(client), + DiscoveryMap: provisionerConfig.StorageClassConfig, }) } @@ -69,13 +81,3 @@ func getNode(client *kubernetes.Clientset, name string) *v1.Node { } return node } - -func createDiscoveryMap(client *kubernetes.Clientset) map[string]common.MountConfig { - config, err := common.GetVolumeConfigFromMountedConfigMap() - if err != nil { - glog.Infof("Could not get config map due to: %v, using default configmap", err) - config = common.GetDefaultVolumeConfig() - } - glog.Infof("Running provisioner with config %+v\n", config) - return config -} diff --git a/local-volume/provisioner/pkg/common/common.go b/local-volume/provisioner/pkg/common/common.go index bdb16b1ddb7..8f2e3409591 100644 --- a/local-volume/provisioner/pkg/common/common.go +++ b/local-volume/provisioner/pkg/common/common.go @@ -17,11 +17,12 @@ limitations under the License. package common import ( - "encoding/json" "fmt" "io/ioutil" "path" + "strings" + "github.com/ghodss/yaml" "github.com/golang/glog" "github.com/kubernetes-incubator/external-storage/local-volume/provisioner/pkg/cache" "github.com/kubernetes-incubator/external-storage/local-volume/provisioner/pkg/util" @@ -54,6 +55,9 @@ const ( EventVolumeFailedDelete = "VolumeFailedDelete" // ProvisionerConfigPath points to the path inside of the provisioner container where configMap volume is mounted ProvisionerConfigPath = "/etc/provisioner/config/" + // ProvisonerStorageClassConfig defines file name of the file which stores storage class + // configuration. The file name must match to the key name used in configuration map. + ProvisonerStorageClassConfig = "storageClassMap" ) // UserConfig stores all the user-defined parameters to the provisioner @@ -67,9 +71,9 @@ type UserConfig struct { // MountConfig stores a configuration for discoverying a specific storageclass type MountConfig struct { // The hostpath directory - HostDir string `json:"hostDir"` + HostDir string `json:"hostDir" yaml:"hostDir"` // The mount point of the hostpath volume - MountDir string `json:"mountDir"` + MountDir string `json:"mountDir" yaml:"mountDir"` } // RuntimeConfig stores all the objects that the provisioner needs to run @@ -99,6 +103,15 @@ type LocalPVConfig struct { AffinityAnn string } +// ProvisionerConfiguration defines Provisioner configuration objects +// Each configuration key of the struct e.g StorageClassConfig is individually +// marshaled in VolumeConfigToConfigMapData. +// TODO Need to find a way to marshal the struct more efficiently. +type ProvisionerConfiguration struct { + // StorageClassConfig defines configuration of Provisioner's storage classes + StorageClassConfig map[string]MountConfig `json:"storageClassMap" yaml:"storageClassMap"` +} + // CreateLocalPVSpec returns a PV spec that can be used for PV creation func CreateLocalPVSpec(config *LocalPVConfig) *v1.PersistentVolume { return &v1.PersistentVolume{ @@ -127,38 +140,19 @@ func CreateLocalPVSpec(config *LocalPVConfig) *v1.PersistentVolume { } } -// GetVolumeConfigFromMountedConfigMap gets volume configuration from the mounted configmap volume, -func GetVolumeConfigFromMountedConfigMap() (map[string]MountConfig, error) { - storageClassMapping := make(map[string]string) - files, err := ioutil.ReadDir(ProvisionerConfigPath) - if err != nil { - return nil, err - } - for _, file := range files { - if !file.IsDir() { - fileContents, err := ioutil.ReadFile(path.Join(ProvisionerConfigPath, file.Name())) - if err != nil { - glog.Infof("Could not read file: %s due to: %v", path.Join(ProvisionerConfigPath, file.Name()), err) - continue - } - storageClassMapping[file.Name()] = string(fileContents) - } - } - return ConfigMapDataToVolumeConfig(storageClassMapping) -} - // GetVolumeConfigFromConfigMap gets volume configuration from given configmap, -func GetVolumeConfigFromConfigMap(client *kubernetes.Clientset, namespace, name string) (map[string]MountConfig, error) { +func GetVolumeConfigFromConfigMap(client *kubernetes.Clientset, namespace, name string, provisionerConfig *ProvisionerConfiguration) error { configMap, err := client.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) if err != nil { - return nil, err + return err } - return ConfigMapDataToVolumeConfig(configMap.Data) + err = ConfigMapDataToVolumeConfig(configMap.Data, provisionerConfig) + return err } // GetDefaultVolumeConfig returns the default volume configuration. -func GetDefaultVolumeConfig() map[string]MountConfig { - return map[string]MountConfig{ +func GetDefaultVolumeConfig(provisionerConfig *ProvisionerConfiguration) { + provisionerConfig.StorageClassConfig = map[string]MountConfig{ "local-storage": { HostDir: DefaultHostDir, MountDir: DefaultMountDir, @@ -167,28 +161,61 @@ func GetDefaultVolumeConfig() map[string]MountConfig { } // VolumeConfigToConfigMapData converts volume config to configmap data. -func VolumeConfigToConfigMapData(config map[string]MountConfig) (map[string]string, error) { +func VolumeConfigToConfigMapData(config *ProvisionerConfiguration) (map[string]string, error) { configMapData := make(map[string]string) - for class, data := range config { - var val []byte - var err error - if val, err = json.Marshal(data); err != nil { - return nil, fmt.Errorf("unable to unmarshal config for class %v: %v", class, err) - } - configMapData[class] = string(val) + val, err := yaml.Marshal(config.StorageClassConfig) + if err != nil { + return nil, fmt.Errorf("unable to Marshal volume config: %v", err) } + configMapData[ProvisonerStorageClassConfig] = string(val) return configMapData, nil } // ConfigMapDataToVolumeConfig converts configmap data to volume config -func ConfigMapDataToVolumeConfig(data map[string]string) (map[string]MountConfig, error) { - mountConfig := make(map[string]MountConfig) - for class, val := range data { - config := MountConfig{} - if err := json.Unmarshal([]byte(val), &config); err != nil { - return nil, fmt.Errorf("unable to unmarshal config for class %v: %v", class, err) +func ConfigMapDataToVolumeConfig(data map[string]string, provisionerConfig *ProvisionerConfiguration) error { + rawYaml := "" + for key, val := range data { + rawYaml += key + rawYaml += ": \n" + rawYaml += insertSpaces(string(val)) + } + if err := yaml.Unmarshal([]byte(rawYaml), provisionerConfig); err != nil { + return fmt.Errorf("fail to Unmarshal yaml due to: %#v", err) + } + return nil +} + +func insertSpaces(original string) string { + spaced := "" + for _, line := range strings.Split(original, "\n") { + spaced += " " + spaced += line + spaced += "\n" + } + return spaced +} + +// LoadProvisionerConfigs loads all configuration into a string and unmarshal it into ProvisionerConfiguration struct. +// The configuration is stored in the configmap which is mounted as a volume. +func LoadProvisionerConfigs(provisionerConfig *ProvisionerConfiguration) error { + files, err := ioutil.ReadDir(ProvisionerConfigPath) + if err != nil { + return err + } + data := make(map[string]string) + for _, file := range files { + if !file.IsDir() { + fileContents, err := ioutil.ReadFile(path.Join(ProvisionerConfigPath, file.Name())) + if err != nil { + // TODO Currently errors in reading configuration keys/files are ignored. This is due to + // the precense of "..data" file, it is a symbolic link which gets created when kubelet mounts a + // configmap inside of a container. It needs to be revisited later for a safer solution, if ignoring read errors + // will prove to be causing issues. + glog.Infof("Could not read file: %s due to: %v", path.Join(ProvisionerConfigPath, file.Name()), err) + continue + } + data[file.Name()] = string(fileContents) } - mountConfig[class] = config } - return mountConfig, nil + return ConfigMapDataToVolumeConfig(data, provisionerConfig) } From 0a387d4a648c211e962464e5e95d4bf10bdd60f0 Mon Sep 17 00:00:00 2001 From: Ian Chakeres Date: Tue, 26 Sep 2017 12:14:36 -0700 Subject: [PATCH 18/68] Revise based upon feedback --- local-volume/README.md | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/local-volume/README.md b/local-volume/README.md index ad5dbce0af9..14f8f7f599c 100644 --- a/local-volume/README.md +++ b/local-volume/README.md @@ -224,6 +224,12 @@ go run hack/e2e.go -- -v --test --test_args="--ginkgo.focus=\[Feature:LocalPersi [GCE Alpha](https://k8s-testgrid.appspot.com/sig-storage#gce-alpha) [GCE GCI Alpha](https://k8s-testgrid.appspot.com/sig-storage#gci-gce-alpha) + +## Requirements + +* The local-volume plugin expects paths to be stable, including across + reboots and when disks are added or removed. + ## Best Practices * For IO isolation, a whole disk per volume is recommended @@ -231,17 +237,10 @@ go run hack/e2e.go -- -v --test --test_args="--ginkgo.focus=\[Feature:LocalPersi * Avoid recreating nodes with the same node name while there are still old PVs with that node's affinity specified. Otherwise, the system could think that the new node contains the old PVs. -* While leveraging the local PV provisioner, race conditions can occur when - an admin is adding new volumes and they are being discovered. To avoid - this situation, in the discovery directories we recommend utilizing bind - mounts for mount points and symlinks for raw block volumes. For example, - if the discovery directory is /mnt/volumes and we want the provisioner - to expose /dev/sda1 as a volume named vol1, then we would bind mount - /dev/sda1 at /mnt/volumes/vol1. -* For volumes with a filesystem, its recommended to utilize their UUID (e.g. - the output from `ls -l /dev/disk/by-uuid`) both in fstab mounting logic and - in the directory name representing that volume. This practice ensures - that the wrong local volume is not mistakenly mounted, even if its mountpoint +* For volumes with a filesystem, it's recommended to utilize their UUID (e.g. + the output from `ls -l /dev/disk/by-uuid`) both in fstab entries + and in the directory name of that mount point. This practice ensures + that the wrong local volume is not mistakenly mounted, even if its device path changes (e.g. if /dev/sda1 becomes /dev/sdb1 when a new disk is added). Additionally, this practice will ensure that if another node with the same name is created, that any volumes on that node are unique and not From 550bd8ed7f721bf7b0eb695ddbf4a298905c2a79 Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Mon, 2 Oct 2017 10:22:03 -0400 Subject: [PATCH 19/68] standalone-cinder doc and interface exporting Address review comments: - Document test enabling interfaces - Since they are used only by this package for testing, unexport them - Add TODO to check access mode when provisioning Signed-off-by: Adam Litke --- .../pkg/provisioner/clusterbroker.go | 1 + .../pkg/provisioner/mapper.go | 6 ++ .../pkg/provisioner/provisioner.go | 22 ++++--- .../pkg/provisioner/provisioner_test.go | 66 +++++++++---------- .../pkg/provisioner/vsbroker.go | 29 ++++---- 5 files changed, 67 insertions(+), 57 deletions(-) diff --git a/openstack/standalone-cinder/pkg/provisioner/clusterbroker.go b/openstack/standalone-cinder/pkg/provisioner/clusterbroker.go index 46bbdf38a97..abcd5443b8e 100644 --- a/openstack/standalone-cinder/pkg/provisioner/clusterbroker.go +++ b/openstack/standalone-cinder/pkg/provisioner/clusterbroker.go @@ -21,6 +21,7 @@ import ( "k8s.io/api/core/v1" ) +// clusterBroker provides a mechanism for tests to override calls kubernetes with mocks. type clusterBroker interface { createSecret(p *cinderProvisioner, ns string, secret *v1.Secret) error deleteSecret(p *cinderProvisioner, ns string, secretName string) error diff --git a/openstack/standalone-cinder/pkg/provisioner/mapper.go b/openstack/standalone-cinder/pkg/provisioner/mapper.go index b5aa4467cb8..cb4fb1a345d 100644 --- a/openstack/standalone-cinder/pkg/provisioner/mapper.go +++ b/openstack/standalone-cinder/pkg/provisioner/mapper.go @@ -28,8 +28,14 @@ import ( ) type volumeMapper interface { + // BuildPVSource should build a PersistentVolumeSource from cinder connection + // information and context from the cluster such as the PVC. BuildPVSource(conn volumeservice.VolumeConnection, options controller.VolumeOptions) (*v1.PersistentVolumeSource, error) + // AuthSetup should perform any authentication setup such as secret creation + // that would be required before a host can connect to the volume. AuthSetup(p *cinderProvisioner, options controller.VolumeOptions, conn volumeservice.VolumeConnection) error + // AuthTeardown should perform any necessary cleanup related to authentication + // as the volume is being deleted. AuthTeardown(p *cinderProvisioner, pv *v1.PersistentVolume) error } diff --git a/openstack/standalone-cinder/pkg/provisioner/provisioner.go b/openstack/standalone-cinder/pkg/provisioner/provisioner.go index ae37810b6ab..4d2a5e13cda 100644 --- a/openstack/standalone-cinder/pkg/provisioner/provisioner.go +++ b/openstack/standalone-cinder/pkg/provisioner/provisioner.go @@ -84,25 +84,27 @@ func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.Per return nil, fmt.Errorf("claim Selector is not supported") } - volumeID, err := p.vsb.CreateCinderVolume(p.VolumeService, options) + // TODO: Check access mode + + volumeID, err := p.vsb.createCinderVolume(p.VolumeService, options) if err != nil { glog.Errorf("Failed to create volume") goto ERROR } - err = p.vsb.WaitForAvailableCinderVolume(p.VolumeService, volumeID) + err = p.vsb.waitForAvailableCinderVolume(p.VolumeService, volumeID) if err != nil { glog.Errorf("Volume %s did not become available", volumeID) goto ERROR_DELETE } - err = p.vsb.ReserveCinderVolume(p.VolumeService, volumeID) + err = p.vsb.reserveCinderVolume(p.VolumeService, volumeID) if err != nil { glog.Errorf("Failed to reserve volume %s: %v", volumeID, err) goto ERROR_DELETE } - connection, err = p.vsb.ConnectCinderVolume(p.VolumeService, volumeID) + connection, err = p.vsb.connectCinderVolume(p.VolumeService, volumeID) if err != nil { glog.Errorf("Failed to connect volume %s: %v", volumeID, err) goto ERROR_UNRESERVE @@ -128,19 +130,19 @@ func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.Per return pv, nil ERROR_DISCONNECT: - cleanupErr = p.vsb.DisconnectCinderVolume(p.VolumeService, volumeID) + cleanupErr = p.vsb.disconnectCinderVolume(p.VolumeService, volumeID) if cleanupErr != nil { glog.Errorf("Failed to disconnect volume %s: %v", volumeID, cleanupErr) } glog.V(3).Infof("Volume %s disconnected", volumeID) ERROR_UNRESERVE: - cleanupErr = p.vsb.UnreserveCinderVolume(p.VolumeService, volumeID) + cleanupErr = p.vsb.unreserveCinderVolume(p.VolumeService, volumeID) if cleanupErr != nil { glog.Errorf("Failed to unreserve volume %s: %v", volumeID, cleanupErr) } glog.V(3).Infof("Volume %s unreserved", volumeID) ERROR_DELETE: - cleanupErr = p.vsb.DeleteCinderVolume(p.VolumeService, volumeID) + cleanupErr = p.vsb.deleteCinderVolume(p.VolumeService, volumeID) if cleanupErr != nil { glog.Errorf("Failed to delete volume %s: %v", volumeID, cleanupErr) } @@ -177,20 +179,20 @@ func (p *cinderProvisioner) Delete(pv *v1.PersistentVolume) error { mapper.AuthTeardown(p, pv) - err = p.vsb.DisconnectCinderVolume(p.VolumeService, volumeID) + err = p.vsb.disconnectCinderVolume(p.VolumeService, volumeID) if err != nil { glog.Errorf("Failed to disconnect volume %s: %v", volumeID, err) return err } - err = p.vsb.UnreserveCinderVolume(p.VolumeService, volumeID) + err = p.vsb.unreserveCinderVolume(p.VolumeService, volumeID) if err != nil { // TODO: Create placeholder PV? glog.Errorf("Failed to unreserve volume %s: %v", volumeID, err) return err } - err = p.vsb.DeleteCinderVolume(p.VolumeService, volumeID) + err = p.vsb.deleteCinderVolume(p.VolumeService, volumeID) if err != nil { glog.Errorf("Failed to delete volume %s: %v", volumeID, err) return err diff --git a/openstack/standalone-cinder/pkg/provisioner/provisioner_test.go b/openstack/standalone-cinder/pkg/provisioner/provisioner_test.go index e92566cf46e..3da1b361fb9 100644 --- a/openstack/standalone-cinder/pkg/provisioner/provisioner_test.go +++ b/openstack/standalone-cinder/pkg/provisioner/provisioner_test.go @@ -72,7 +72,7 @@ var _ = Describe("Provisioner", func() { Context("when creating a volume fails", func() { BeforeEach(func() { - vsb.mightFail.set("CreateCinderVolume") + vsb.mightFail.set("createCinderVolume") }) It("should fail", func() { Expect(pv).To(BeNil()) @@ -82,8 +82,8 @@ var _ = Describe("Provisioner", func() { Context("when the volume does not become available", func() { BeforeEach(func() { - vsb.mightFail.set("WaitForAvailableCinderVolume") - cleanup = "DeleteCinderVolume." + vsb.mightFail.set("waitForAvailableCinderVolume") + cleanup = "deleteCinderVolume." }) It("should fail and delete the volume", func() { Expect(pv).To(BeNil()) @@ -94,8 +94,8 @@ var _ = Describe("Provisioner", func() { Context("when reserving the volume fails", func() { BeforeEach(func() { - vsb.mightFail.set("ReserveCinderVolume") - cleanup = "DeleteCinderVolume." + vsb.mightFail.set("reserveCinderVolume") + cleanup = "deleteCinderVolume." }) It("should fail and delete the volume", func() { Expect(pv).To(BeNil()) @@ -106,8 +106,8 @@ var _ = Describe("Provisioner", func() { Context("when connecting the volume fails", func() { BeforeEach(func() { - vsb.mightFail.set("ConnectCinderVolume") - cleanup = "UnreserveCinderVolume.DeleteCinderVolume." + vsb.mightFail.set("connectCinderVolume") + cleanup = "unreserveCinderVolume.deleteCinderVolume." }) It("should fail and the volume should be unreserved and deleted", func() { Expect(pv).To(BeNil()) @@ -118,8 +118,8 @@ var _ = Describe("Provisioner", func() { Context("when getting a volumeMapper fails", func() { BeforeEach(func() { - mb.mightFail.set("NewVolumeMapperFromConnection") - cleanup = "DisconnectCinderVolume.UnreserveCinderVolume.DeleteCinderVolume." + mb.mightFail.set("newVolumeMapperFromConnection") + cleanup = "disconnectCinderVolume.unreserveCinderVolume.deleteCinderVolume." }) It("should fail and the volume should be disconnected, unreserved and deleted", func() { Expect(pv).To(BeNil()) @@ -131,7 +131,7 @@ var _ = Describe("Provisioner", func() { Context("when preparing volume authentication fails", func() { BeforeEach(func() { mb.FakeVolumeMapper.mightFail.set("AuthSetup") - cleanup = "DisconnectCinderVolume.UnreserveCinderVolume.DeleteCinderVolume." + cleanup = "disconnectCinderVolume.unreserveCinderVolume.deleteCinderVolume." }) It("should fail and the volume should be disconnected, unreserved and deleted", func() { Expect(pv).To(BeNil()) @@ -142,8 +142,8 @@ var _ = Describe("Provisioner", func() { Context("when building the PV fails", func() { BeforeEach(func() { - mb.mightFail.set("BuildPV") - cleanup = "DisconnectCinderVolume.UnreserveCinderVolume.DeleteCinderVolume." + mb.mightFail.set("buildPV") + cleanup = "disconnectCinderVolume.unreserveCinderVolume.deleteCinderVolume." }) It("should fail and the volume should be disconnected, unreserved and deleted", func() { Expect(pv).To(BeNil()) @@ -212,7 +212,7 @@ var _ = Describe("Provisioner", func() { Context("when getting a volumeMapper fails", func() { BeforeEach(func() { - mb.mightFail.set("NewVolumeMapperFromPV") + mb.mightFail.set("newVolumeMapperFromPV") }) It("should fail", func() { Expect(err).To(Not(BeNil())) @@ -230,7 +230,7 @@ var _ = Describe("Provisioner", func() { Context("when disconnecting the volume fails", func() { BeforeEach(func() { - vsb.mightFail.set("DisconnectCinderVolume") + vsb.mightFail.set("disconnectCinderVolume") }) It("should fail", func() { Expect(err).To(Not(BeNil())) @@ -239,7 +239,7 @@ var _ = Describe("Provisioner", func() { Context("when unreserving the volume fails", func() { BeforeEach(func() { - vsb.mightFail.set("UnreserveCinderVolume") + vsb.mightFail.set("unreserveCinderVolume") }) It("should fail", func() { Expect(err).To(Not(BeNil())) @@ -248,7 +248,7 @@ var _ = Describe("Provisioner", func() { Context("when deleting the volume fails", func() { BeforeEach(func() { - vsb.mightFail.set("DeleteCinderVolume") + vsb.mightFail.set("deleteCinderVolume") }) It("should fail", func() { Expect(err).To(Not(BeNil())) @@ -263,23 +263,23 @@ type fakeVolumeServiceBroker struct { volumeServiceBroker } -func (vsb *fakeVolumeServiceBroker) CreateCinderVolume(vs *gophercloud.ServiceClient, options controller.VolumeOptions) (string, error) { - if vsb.mightFail.isSet("CreateCinderVolume") { +func (vsb *fakeVolumeServiceBroker) createCinderVolume(vs *gophercloud.ServiceClient, options controller.VolumeOptions) (string, error) { + if vsb.mightFail.isSet("createCinderVolume") { return "", errors.New("injected error for testing") } return "cinderVolumeID", nil } -func (vsb *fakeVolumeServiceBroker) WaitForAvailableCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { - return vsb.mightFail.ret("WaitForAvailableCinderVolume") +func (vsb *fakeVolumeServiceBroker) waitForAvailableCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return vsb.mightFail.ret("waitForAvailableCinderVolume") } -func (vsb *fakeVolumeServiceBroker) ReserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { - return vsb.mightFail.ret("ReserveCinderVolume") +func (vsb *fakeVolumeServiceBroker) reserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return vsb.mightFail.ret("reserveCinderVolume") } -func (vsb *fakeVolumeServiceBroker) ConnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) (volumeservice.VolumeConnection, error) { - if vsb.mightFail.isSet("ConnectCinderVolume") { +func (vsb *fakeVolumeServiceBroker) connectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) (volumeservice.VolumeConnection, error) { + if vsb.mightFail.isSet("connectCinderVolume") { return volumeservice.VolumeConnection{}, errors.New("injected error for testing") } return volumeservice.VolumeConnection{}, nil @@ -294,16 +294,16 @@ func (vsb *fakeVolumeServiceBroker) _cleanupRet(fn string) error { return nil } -func (vsb *fakeVolumeServiceBroker) DisconnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { - return vsb._cleanupRet("DisconnectCinderVolume") +func (vsb *fakeVolumeServiceBroker) disconnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return vsb._cleanupRet("disconnectCinderVolume") } -func (vsb *fakeVolumeServiceBroker) UnreserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { - return vsb._cleanupRet("UnreserveCinderVolume") +func (vsb *fakeVolumeServiceBroker) unreserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return vsb._cleanupRet("unreserveCinderVolume") } -func (vsb *fakeVolumeServiceBroker) DeleteCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { - return vsb._cleanupRet("DeleteCinderVolume") +func (vsb *fakeVolumeServiceBroker) deleteCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { + return vsb._cleanupRet("deleteCinderVolume") } type fakeMapperBroker struct { @@ -320,21 +320,21 @@ func newFakeMapperBroker() *fakeMapperBroker { } func (mb *fakeMapperBroker) newVolumeMapperFromConnection(conn volumeservice.VolumeConnection) (volumeMapper, error) { - if mb.mightFail.isSet("NewVolumeMapperFromConnection") { + if mb.mightFail.isSet("newVolumeMapperFromConnection") { return nil, errors.New("injected error for testing") } return mb.FakeVolumeMapper, nil } func (mb *fakeMapperBroker) newVolumeMapperFromPV(pv *v1.PersistentVolume) (volumeMapper, error) { - if mb.mightFail.isSet("NewVolumeMapperFromPV") { + if mb.mightFail.isSet("newVolumeMapperFromPV") { return nil, errors.New("injected error for testing") } return mb.FakeVolumeMapper, nil } func (mb *fakeMapperBroker) buildPV(m volumeMapper, p *cinderProvisioner, options controller.VolumeOptions, conn volumeservice.VolumeConnection, volumeID string) (*v1.PersistentVolume, error) { - if mb.mightFail.isSet("BuildPV") { + if mb.mightFail.isSet("buildPV") { return nil, errors.New("injected error for testing") } return &v1.PersistentVolume{}, nil diff --git a/openstack/standalone-cinder/pkg/provisioner/vsbroker.go b/openstack/standalone-cinder/pkg/provisioner/vsbroker.go index 005c46f8262..23d64c259ec 100644 --- a/openstack/standalone-cinder/pkg/provisioner/vsbroker.go +++ b/openstack/standalone-cinder/pkg/provisioner/vsbroker.go @@ -22,44 +22,45 @@ import ( "github.com/kubernetes-incubator/external-storage/openstack/standalone-cinder/pkg/volumeservice" ) +// volumeServiceBroker provides a mechanism for tests to override calls to cinder with mocks. type volumeServiceBroker interface { - CreateCinderVolume(vs *gophercloud.ServiceClient, options controller.VolumeOptions) (string, error) - WaitForAvailableCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error - ReserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error - ConnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) (volumeservice.VolumeConnection, error) - DisconnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error - UnreserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error - DeleteCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error + createCinderVolume(vs *gophercloud.ServiceClient, options controller.VolumeOptions) (string, error) + waitForAvailableCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error + reserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error + connectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) (volumeservice.VolumeConnection, error) + disconnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error + unreserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error + deleteCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error } type gophercloudBroker struct { volumeServiceBroker } -func (vsb *gophercloudBroker) CreateCinderVolume(vs *gophercloud.ServiceClient, options controller.VolumeOptions) (string, error) { +func (vsb *gophercloudBroker) createCinderVolume(vs *gophercloud.ServiceClient, options controller.VolumeOptions) (string, error) { return volumeservice.CreateCinderVolume(vs, options) } -func (vsb *gophercloudBroker) WaitForAvailableCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { +func (vsb *gophercloudBroker) waitForAvailableCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { return volumeservice.WaitForAvailableCinderVolume(vs, volumeID) } -func (vsb *gophercloudBroker) ReserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { +func (vsb *gophercloudBroker) reserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { return volumeservice.ReserveCinderVolume(vs, volumeID) } -func (vsb *gophercloudBroker) ConnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) (volumeservice.VolumeConnection, error) { +func (vsb *gophercloudBroker) connectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) (volumeservice.VolumeConnection, error) { return volumeservice.ConnectCinderVolume(vs, volumeID) } -func (vsb *gophercloudBroker) DisconnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { +func (vsb *gophercloudBroker) disconnectCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { return volumeservice.DisconnectCinderVolume(vs, volumeID) } -func (vsb *gophercloudBroker) UnreserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { +func (vsb *gophercloudBroker) unreserveCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { return volumeservice.UnreserveCinderVolume(vs, volumeID) } -func (vsb *gophercloudBroker) DeleteCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { +func (vsb *gophercloudBroker) deleteCinderVolume(vs *gophercloud.ServiceClient, volumeID string) error { return volumeservice.DeleteCinderVolume(vs, volumeID) } From 224ed8f03761457b8595dda44965eb11ba8a1084 Mon Sep 17 00:00:00 2001 From: Huamin Chen Date: Mon, 2 Oct 2017 20:39:03 +0000 Subject: [PATCH 20/68] don't use dash in volume snapshot Signed-off-by: Huamin Chen --- snapshot/examples/aws/snapshot.yaml | 2 +- snapshot/examples/gce/snapshot.yaml | 2 +- snapshot/examples/hostpath/README.md | 4 ++-- snapshot/examples/hostpath/snapshot.yaml | 2 +- snapshot/pkg/apis/crd/v1/register.go | 2 +- snapshot/pkg/apis/crd/v1/types.go | 8 ++++---- snapshot/test/hostpath_files/hostpath-snapshot.yaml | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/snapshot/examples/aws/snapshot.yaml b/snapshot/examples/aws/snapshot.yaml index 95ab2b51670..8b68f5ec9fe 100644 --- a/snapshot/examples/aws/snapshot.yaml +++ b/snapshot/examples/aws/snapshot.yaml @@ -1,4 +1,4 @@ -apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 +apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: name: snapshot-demo diff --git a/snapshot/examples/gce/snapshot.yaml b/snapshot/examples/gce/snapshot.yaml index ae576e5a1d2..7a859936eb4 100644 --- a/snapshot/examples/gce/snapshot.yaml +++ b/snapshot/examples/gce/snapshot.yaml @@ -1,4 +1,4 @@ -apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 +apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: name: snapshot-1 diff --git a/snapshot/examples/hostpath/README.md b/snapshot/examples/hostpath/README.md index e53829e0cb9..fd01782f594 100644 --- a/snapshot/examples/hostpath/README.md +++ b/snapshot/examples/hostpath/README.md @@ -42,7 +42,7 @@ Now we have PVC bound to a PV that contains some data. We want to take snapshot kubectl get volumesnapshot,volumesnapshotdata -o yaml apiVersion: v1 items: - - apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 + - apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: labels: @@ -58,7 +58,7 @@ Now we have PVC bound to a PV that contains some data. We want to take snapshot message: Snapshot created successfully status: "True" type: Ready - - apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 + - apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 kind: VolumeSnapshotData metadata: name: k8s-volume-snapshot-d2209d5a-97a9-11e7-b963-5254000ac840 diff --git a/snapshot/examples/hostpath/snapshot.yaml b/snapshot/examples/hostpath/snapshot.yaml index bd838d06c7a..728406d53a3 100644 --- a/snapshot/examples/hostpath/snapshot.yaml +++ b/snapshot/examples/hostpath/snapshot.yaml @@ -1,4 +1,4 @@ -apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 +apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: name: snapshot-demo diff --git a/snapshot/pkg/apis/crd/v1/register.go b/snapshot/pkg/apis/crd/v1/register.go index e42cdf384bb..b768861dffe 100644 --- a/snapshot/pkg/apis/crd/v1/register.go +++ b/snapshot/pkg/apis/crd/v1/register.go @@ -23,7 +23,7 @@ import ( ) // GroupName is the group name use in this package. -const GroupName = "volume-snapshot-data.external-storage.k8s.io" +const GroupName = "volumesnapshotdata.external-storage.k8s.io" var ( // SchemeBuilder is the new scheme builder diff --git a/snapshot/pkg/apis/crd/v1/types.go b/snapshot/pkg/apis/crd/v1/types.go index a0cded1e471..dd06a575172 100644 --- a/snapshot/pkg/apis/crd/v1/types.go +++ b/snapshot/pkg/apis/crd/v1/types.go @@ -27,12 +27,12 @@ import ( const ( // VolumeSnapshotDataResourcePlural is "volumesnapshotdatas" VolumeSnapshotDataResourcePlural = "volumesnapshotdatas" - // VolumeSnapshotDataResource is "volume-snapshot-data" - VolumeSnapshotDataResource = "volume-snapshot-data" + // VolumeSnapshotDataResource is "volumesnapshotdata" + VolumeSnapshotDataResource = "volumesnapshotdata" // VolumeSnapshotResourcePlural is "volumesnapshots" VolumeSnapshotResourcePlural = "volumesnapshots" - // VolumeSnapshotResource is "volume-snapshot" - VolumeSnapshotResource = "volume-snapshot" + // VolumeSnapshotResource is "volumesnapshot" + VolumeSnapshotResource = "volumesnapshot" ) // VolumeSnapshotStatus is the status of the VolumeSnapshot diff --git a/snapshot/test/hostpath_files/hostpath-snapshot.yaml b/snapshot/test/hostpath_files/hostpath-snapshot.yaml index 73a15e55fd2..be042714937 100644 --- a/snapshot/test/hostpath_files/hostpath-snapshot.yaml +++ b/snapshot/test/hostpath_files/hostpath-snapshot.yaml @@ -1,4 +1,4 @@ -apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 +apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: name: hostpath-test-snapshot From c5ca116315c44c78afa3a286b2ed780b747f6b71 Mon Sep 17 00:00:00 2001 From: Huamin Chen Date: Mon, 2 Oct 2017 21:06:02 +0000 Subject: [PATCH 21/68] switch to volumesnapshot in api version Signed-off-by: Huamin Chen --- snapshot/examples/aws/snapshot.yaml | 2 +- snapshot/examples/gce/snapshot.yaml | 2 +- snapshot/examples/hostpath/README.md | 4 ++-- snapshot/examples/hostpath/snapshot.yaml | 2 +- snapshot/pkg/apis/crd/v1/register.go | 2 +- snapshot/pkg/apis/crd/v1/types.go | 4 ---- snapshot/test/hostpath_files/hostpath-snapshot.yaml | 2 +- 7 files changed, 7 insertions(+), 11 deletions(-) diff --git a/snapshot/examples/aws/snapshot.yaml b/snapshot/examples/aws/snapshot.yaml index 8b68f5ec9fe..c382966fe50 100644 --- a/snapshot/examples/aws/snapshot.yaml +++ b/snapshot/examples/aws/snapshot.yaml @@ -1,4 +1,4 @@ -apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 +apiVersion: volumesnapshot.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: name: snapshot-demo diff --git a/snapshot/examples/gce/snapshot.yaml b/snapshot/examples/gce/snapshot.yaml index 7a859936eb4..13c09f5e0a3 100644 --- a/snapshot/examples/gce/snapshot.yaml +++ b/snapshot/examples/gce/snapshot.yaml @@ -1,4 +1,4 @@ -apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 +apiVersion: volumesnapshot.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: name: snapshot-1 diff --git a/snapshot/examples/hostpath/README.md b/snapshot/examples/hostpath/README.md index fd01782f594..27dc541d136 100644 --- a/snapshot/examples/hostpath/README.md +++ b/snapshot/examples/hostpath/README.md @@ -42,7 +42,7 @@ Now we have PVC bound to a PV that contains some data. We want to take snapshot kubectl get volumesnapshot,volumesnapshotdata -o yaml apiVersion: v1 items: - - apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 + - apiVersion: volumesnapshot.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: labels: @@ -58,7 +58,7 @@ Now we have PVC bound to a PV that contains some data. We want to take snapshot message: Snapshot created successfully status: "True" type: Ready - - apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 + - apiVersion: volumesnapshot.external-storage.k8s.io/v1 kind: VolumeSnapshotData metadata: name: k8s-volume-snapshot-d2209d5a-97a9-11e7-b963-5254000ac840 diff --git a/snapshot/examples/hostpath/snapshot.yaml b/snapshot/examples/hostpath/snapshot.yaml index 728406d53a3..781b1210f14 100644 --- a/snapshot/examples/hostpath/snapshot.yaml +++ b/snapshot/examples/hostpath/snapshot.yaml @@ -1,4 +1,4 @@ -apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 +apiVersion: volumesnapshot.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: name: snapshot-demo diff --git a/snapshot/pkg/apis/crd/v1/register.go b/snapshot/pkg/apis/crd/v1/register.go index b768861dffe..7f9538e2835 100644 --- a/snapshot/pkg/apis/crd/v1/register.go +++ b/snapshot/pkg/apis/crd/v1/register.go @@ -23,7 +23,7 @@ import ( ) // GroupName is the group name use in this package. -const GroupName = "volumesnapshotdata.external-storage.k8s.io" +const GroupName = "volumesnapshot.external-storage.k8s.io" var ( // SchemeBuilder is the new scheme builder diff --git a/snapshot/pkg/apis/crd/v1/types.go b/snapshot/pkg/apis/crd/v1/types.go index dd06a575172..5d96282c010 100644 --- a/snapshot/pkg/apis/crd/v1/types.go +++ b/snapshot/pkg/apis/crd/v1/types.go @@ -27,12 +27,8 @@ import ( const ( // VolumeSnapshotDataResourcePlural is "volumesnapshotdatas" VolumeSnapshotDataResourcePlural = "volumesnapshotdatas" - // VolumeSnapshotDataResource is "volumesnapshotdata" - VolumeSnapshotDataResource = "volumesnapshotdata" // VolumeSnapshotResourcePlural is "volumesnapshots" VolumeSnapshotResourcePlural = "volumesnapshots" - // VolumeSnapshotResource is "volumesnapshot" - VolumeSnapshotResource = "volumesnapshot" ) // VolumeSnapshotStatus is the status of the VolumeSnapshot diff --git a/snapshot/test/hostpath_files/hostpath-snapshot.yaml b/snapshot/test/hostpath_files/hostpath-snapshot.yaml index be042714937..ba00e1a788b 100644 --- a/snapshot/test/hostpath_files/hostpath-snapshot.yaml +++ b/snapshot/test/hostpath_files/hostpath-snapshot.yaml @@ -1,4 +1,4 @@ -apiVersion: volumesnapshotdata.external-storage.k8s.io/v1 +apiVersion: volumesnapshot.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: name: hostpath-test-snapshot From 0cb5f31340dd500ffccad05b3cec63cb469ecf44 Mon Sep 17 00:00:00 2001 From: Bo Thompson Date: Mon, 2 Oct 2017 17:03:30 -0700 Subject: [PATCH 22/68] nfs-provisioner - added maxExports flag The `maxExports` flag controls the maximum number of volumes a provisioner is allowed to export. Once this limit is reached new claims will be ignored with the hope that another provisioner will provision it. Omitting this flag (or setting to a negative value) will disable this feature, effectively allowing for unlimited exports. --- nfs/cmd/nfs-provisioner/main.go | 3 +- nfs/pkg/volume/export.go | 26 +++++-- nfs/pkg/volume/provision.go | 18 ++++- nfs/pkg/volume/provision_test.go | 125 ++++++++++++++++++++++++++++--- 4 files changed, 154 insertions(+), 18 deletions(-) diff --git a/nfs/cmd/nfs-provisioner/main.go b/nfs/cmd/nfs-provisioner/main.go index d19ee7369a1..6f8485ec28b 100644 --- a/nfs/cmd/nfs-provisioner/main.go +++ b/nfs/cmd/nfs-provisioner/main.go @@ -42,6 +42,7 @@ var ( gracePeriod = flag.Uint("grace-period", 90, "NFS Ganesha grace period to use in seconds, from 0-180. If the server is not expected to survive restarts, i.e. it is running as a pod & its export directory is not persisted, this can be set to 0. Can only be set if both run-server and use-ganesha are true. Default 90.") enableXfsQuota = flag.Bool("enable-xfs-quota", false, "If the provisioner will set xfs quotas for each volume it provisions. Requires that the directory it creates volumes in ('/export') is xfs mounted with option prjquota/pquota, and that it has the privilege to run xfs_quota. Default false.") serverHostname = flag.String("server-hostname", "", "The hostname for the NFS server to export from. Only applicable when running out-of-cluster i.e. it can only be set if either master or kubeconfig are set. If unset, the first IP output by `hostname -i` is used.") + maxExports = flag.Int("max-exports", -1, "The maximum number of volumes to be exported by this provisioner. New claims will be ignored once this limit has been reached. A negative value is interpreted as 'unlimited'. Default -1.") ) const ( @@ -123,7 +124,7 @@ func main() { // Create the provisioner: it implements the Provisioner interface expected by // the controller - nfsProvisioner := vol.NewNFSProvisioner(exportDir, clientset, outOfCluster, *useGanesha, ganeshaConfig, *enableXfsQuota, *serverHostname) + nfsProvisioner := vol.NewNFSProvisioner(exportDir, clientset, outOfCluster, *useGanesha, ganeshaConfig, *enableXfsQuota, *serverHostname, *maxExports) // Start the provision controller which will dynamically provision NFS PVs pc := controller.NewProvisionController( diff --git a/nfs/pkg/volume/export.go b/nfs/pkg/volume/export.go index e8636fc030a..c22aef04ef0 100644 --- a/nfs/pkg/volume/export.go +++ b/nfs/pkg/volume/export.go @@ -30,6 +30,7 @@ import ( ) type exporter interface { + CanExport(int) bool AddExportBlock(string, bool) (string, uint16, error) RemoveExportBlock(string, uint16) error Export(string) error @@ -40,14 +41,27 @@ type exportBlockCreator interface { CreateExportBlock(string, string, bool) string } -type genericExporter struct { - ebc exportBlockCreator - config string - +type exportMap struct { // Map to track used exportIDs. Each ganesha export needs a unique fsid and // Export_Id, each kernel a unique fsid. Assign each export an exportID and // use it as both fsid and Export_Id. exportIDs map[uint16]bool +} + +func (e *exportMap) CanExport(limit int) bool { + if limit < 0 { + return true + } + + totalExports := len(e.exportIDs) + return totalExports < limit +} + +type genericExporter struct { + *exportMap + + ebc exportBlockCreator + config string mapMutex *sync.Mutex fileMutex *sync.Mutex @@ -63,9 +77,11 @@ func newGenericExporter(ebc exportBlockCreator, config string, re *regexp.Regexp glog.Errorf("error while populating exportIDs map, there may be errors exporting later if exportIDs are reused: %v", err) } return &genericExporter{ + exportMap: &exportMap{ + exportIDs: exportIDs, + }, ebc: ebc, config: config, - exportIDs: exportIDs, mapMutex: &sync.Mutex{}, fileMutex: &sync.Mutex{}, } diff --git a/nfs/pkg/volume/provision.go b/nfs/pkg/volume/provision.go index 180080aeb59..365fd35d07e 100644 --- a/nfs/pkg/volume/provision.go +++ b/nfs/pkg/volume/provision.go @@ -78,7 +78,7 @@ const ( // NewNFSProvisioner creates a Provisioner that provisions NFS PVs backed by // the given directory. -func NewNFSProvisioner(exportDir string, client kubernetes.Interface, outOfCluster bool, useGanesha bool, ganeshaConfig string, enableXfsQuota bool, serverHostname string) controller.Provisioner { +func NewNFSProvisioner(exportDir string, client kubernetes.Interface, outOfCluster bool, useGanesha bool, ganeshaConfig string, enableXfsQuota bool, serverHostname string, maxExports int) controller.Provisioner { var exp exporter if useGanesha { exp = newGaneshaExporter(ganeshaConfig) @@ -95,10 +95,10 @@ func NewNFSProvisioner(exportDir string, client kubernetes.Interface, outOfClust } else { quotaer = newDummyQuotaer() } - return newNFSProvisionerInternal(exportDir, client, outOfCluster, exp, quotaer, serverHostname) + return newNFSProvisionerInternal(exportDir, client, outOfCluster, exp, quotaer, serverHostname, maxExports) } -func newNFSProvisionerInternal(exportDir string, client kubernetes.Interface, outOfCluster bool, exporter exporter, quotaer quotaer, serverHostname string) *nfsProvisioner { +func newNFSProvisionerInternal(exportDir string, client kubernetes.Interface, outOfCluster bool, exporter exporter, quotaer quotaer, serverHostname string, maxExports int) *nfsProvisioner { if _, err := os.Stat(exportDir); os.IsNotExist(err) { glog.Fatalf("exportDir %s does not exist!", exportDir) } @@ -126,6 +126,7 @@ func newNFSProvisionerInternal(exportDir string, client kubernetes.Interface, ou exporter: exporter, quotaer: quotaer, serverHostname: serverHostname, + maxExports: maxExports, identity: identity, podIPEnv: podIPEnv, serviceEnv: serviceEnv, @@ -158,6 +159,9 @@ type nfsProvisioner struct { // running as a Docker container serverHostname string + // The maximum number of volumes to be exported by the provisioner + maxExports int + // Identity of this nfsProvisioner, generated & persisted to exportDir or // recovered from there. Used to mark provisioned PVs identity types.UID @@ -247,6 +251,10 @@ func (p *nfsProvisioner) createVolume(options controller.VolumeOptions) (volume, return volume{}, fmt.Errorf("error getting NFS server IP for volume: %v", err) } + if ok := p.checkExportLimit(); !ok { + return volume{}, &controller.IgnoredError{Reason: fmt.Sprintf("export limit of %v has been reached", p.maxExports)} + } + path := path.Join(p.exportDir, options.PVName) err = p.createDirectory(options.PVName, gid) @@ -414,6 +422,10 @@ func (p *nfsProvisioner) getServer() (string, error) { return service.Spec.ClusterIP, nil } +func (p *nfsProvisioner) checkExportLimit() bool { + return p.exporter.CanExport(p.maxExports) +} + // createDirectory creates the given directory in exportDir with appropriate // permissions and ownership according to the given gid parameter string. func (p *nfsProvisioner) createDirectory(directory, gid string) error { diff --git a/nfs/pkg/volume/provision_test.go b/nfs/pkg/volume/provision_test.go index 20e8ebb1360..00ca1d08354 100644 --- a/nfs/pkg/volume/provision_test.go +++ b/nfs/pkg/volume/provision_test.go @@ -18,7 +18,9 @@ package volume import ( "errors" + "fmt" "io/ioutil" + "math" "os" "reflect" "regexp" @@ -51,6 +53,7 @@ func TestCreateVolume(t *testing.T) { expectedBlock string expectedExportID uint16 expectError bool + expectIgnored bool }{ { name: "succeed creating volume", @@ -64,8 +67,8 @@ func TestCreateVolume(t *testing.T) { expectedServer: "1.1.1.1", expectedPath: tmpDir + "/pvc-1", expectedGroup: 0, - expectedBlock: "\nExport_Id = 0;\n", - expectedExportID: 0, + expectedBlock: "\nExport_Id = 1;\n", + expectedExportID: 1, expectError: false, }, { @@ -80,8 +83,8 @@ func TestCreateVolume(t *testing.T) { expectedServer: "1.1.1.1", expectedPath: tmpDir + "/pvc-2", expectedGroup: 0, - expectedBlock: "\nExport_Id = 0;\n", - expectedExportID: 0, + expectedBlock: "\nExport_Id = 2;\n", + expectedExportID: 2, expectError: false, }, { @@ -99,6 +102,7 @@ func TestCreateVolume(t *testing.T) { expectedBlock: "", expectedExportID: 0, expectError: true, + expectIgnored: false, }, { name: "bad server", @@ -115,6 +119,7 @@ func TestCreateVolume(t *testing.T) { expectedBlock: "", expectedExportID: 0, expectError: true, + expectIgnored: false, }, { name: "dir already exists", @@ -131,6 +136,7 @@ func TestCreateVolume(t *testing.T) { expectedBlock: "", expectedExportID: 0, expectError: true, + expectIgnored: false, }, { name: "error exporting", @@ -148,6 +154,39 @@ func TestCreateVolume(t *testing.T) { expectedExportID: 0, expectError: true, }, + { + name: "succeed creating volume last slot", + options: controller.VolumeOptions{ + PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete, + PVName: "pvc-3", + PVC: newClaim(resource.MustParse("1Ki"), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, nil), + Parameters: map[string]string{}, + }, + envKey: podIPEnv, + expectedServer: "1.1.1.1", + expectedPath: tmpDir + "/pvc-3", + expectedGroup: 0, + expectedBlock: "\nExport_Id = 3;\n", + expectedExportID: 3, + expectError: false, + }, + { + name: "max export limit exceeded", + options: controller.VolumeOptions{ + PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete, + PVName: "pvc-3", + PVC: newClaim(resource.MustParse("1Ki"), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, nil), + Parameters: map[string]string{}, + }, + envKey: podIPEnv, + expectedServer: "", + expectedPath: "", + expectedGroup: 0, + expectedBlock: "", + expectedExportID: 0, + expectError: true, + expectIgnored: true, + }, } client := fake.NewSimpleClientset() @@ -156,12 +195,20 @@ func TestCreateVolume(t *testing.T) { if err != nil { t.Errorf("Error creating file %s: %v", conf, err) } - p := newNFSProvisionerInternal(tmpDir+"/", client, false, &testExporter{config: conf}, newDummyQuotaer(), "") + exporter := &testExporter{ + exportMap: &exportMap{exportIDs: map[uint16]bool{}}, + config: conf, + } + maxExports := 3 + p := newNFSProvisionerInternal(tmpDir+"/", client, false, exporter, newDummyQuotaer(), "", maxExports) for _, test := range tests { os.Setenv(test.envKey, "1.1.1.1") volume, err := p.createVolume(test.options) + if err == nil { + p.exporter.(*testExporter).exportIDs[volume.exportID] = true + } evaluate(t, test.name, test.expectError, err, test.expectedServer, volume.server, "server") evaluate(t, test.name, test.expectError, err, test.expectedPath, volume.path, "path") @@ -169,6 +216,9 @@ func TestCreateVolume(t *testing.T) { evaluate(t, test.name, test.expectError, err, test.expectedBlock, volume.exportBlock, "block") evaluate(t, test.name, test.expectError, err, test.expectedExportID, volume.exportID, "export id") + _, isIgnored := err.(*controller.IgnoredError) + evaluate(t, test.name, test.expectError, err, test.expectIgnored, isIgnored, "ignored error") + os.Unsetenv(test.envKey) } } @@ -294,7 +344,7 @@ func TestValidateOptions(t *testing.T) { } client := fake.NewSimpleClientset() - p := newNFSProvisionerInternal(tmpDir+"/", client, false, &testExporter{}, newDummyQuotaer(), "") + p := newNFSProvisionerInternal(tmpDir+"/", client, false, &testExporter{}, newDummyQuotaer(), "", -1) for _, test := range tests { gid, rootSquash, _, err := p.validateOptions(test.options) @@ -304,6 +354,48 @@ func TestValidateOptions(t *testing.T) { } } +func TestCheckExportLimit(t *testing.T) { + tmpDir := utiltesting.MkTmpdirOrDie("nfsProvisionTest") + defer os.RemoveAll(tmpDir) + + tests := []struct { + name string + configContents string + exportIDs map[uint16]bool + maxExports int + expectedResult bool + expectError bool + }{ + { + name: "unlimited exports", + exportIDs: map[uint16]bool{1: true, 3: true}, + maxExports: -1, + expectedResult: true, + expectError: false, + }, + { + name: "max export limit reached", + exportIDs: map[uint16]bool{1: true, 3: true}, + maxExports: 2, + expectedResult: false, + expectError: false, + }, + { + name: "max export limit not reached", + exportIDs: map[uint16]bool{1: true}, + maxExports: 2, + expectedResult: true, + expectError: false, + }, + } + for _, test := range tests { + client := fake.NewSimpleClientset() + p := newNFSProvisionerInternal(tmpDir+"/", client, false, &testExporter{exportMap: &exportMap{exportIDs: test.exportIDs}}, newDummyQuotaer(), "", test.maxExports) + ok := p.checkExportLimit() + evaluate(t, test.name, test.expectError, nil, test.expectedResult, ok, "checkExportLimit") + } +} + func TestCreateDirectory(t *testing.T) { tmpDir := utiltesting.MkTmpdirOrDie("nfsProvisionTest") defer os.RemoveAll(tmpDir) @@ -354,7 +446,7 @@ func TestCreateDirectory(t *testing.T) { } client := fake.NewSimpleClientset() - p := newNFSProvisionerInternal(tmpDir+"/", client, false, &testExporter{}, newDummyQuotaer(), "") + p := newNFSProvisionerInternal(tmpDir+"/", client, false, &testExporter{}, newDummyQuotaer(), "", -1) for _, test := range tests { path := p.exportDir + test.directory @@ -635,7 +727,7 @@ func TestGetServer(t *testing.T) { } client := fake.NewSimpleClientset(test.objs...) - p := newNFSProvisionerInternal(tmpDir+"/", client, test.outOfCluster, &testExporter{}, newDummyQuotaer(), test.serverHostname) + p := newNFSProvisionerInternal(tmpDir+"/", client, test.outOfCluster, &testExporter{}, newDummyQuotaer(), test.serverHostname, -1) server, err := p.getServer() @@ -707,13 +799,28 @@ func newEndpoints(name string, ips []string, ports []endpointPort) *v1.Endpoints } type testExporter struct { + *exportMap + config string } var _ exporter = &testExporter{} +func (e *testExporter) CanExport(limit int) bool { + if e.exportMap != nil { + return e.exportMap.CanExport(limit) + } + return true +} + func (e *testExporter) AddExportBlock(path string, _ bool) (string, uint16, error) { - return "\nExport_Id = 0;\n", 0, nil + id := uint16(1) + for ; id <= math.MaxUint16; id++ { + if _, ok := e.exportIDs[id]; !ok { + break + } + } + return fmt.Sprintf("\nExport_Id = %d;\n", id), id, nil } func (e *testExporter) RemoveExportBlock(block string, exportID uint16) error { From c76e5abad641d70ca8b99abb7cc3eff7a92149ef Mon Sep 17 00:00:00 2001 From: Tomas Smetana Date: Tue, 19 Sep 2017 14:18:29 +0200 Subject: [PATCH 23/68] Snapshot: add user guide and move documentation to a common directory --- snapshot/{ => doc}/examples/aws/README.md | 0 snapshot/{ => doc}/examples/aws/claim.yaml | 0 snapshot/{ => doc}/examples/aws/class.yaml | 0 snapshot/{ => doc}/examples/aws/pod.yaml | 0 snapshot/{ => doc}/examples/aws/pvc.yaml | 0 snapshot/{ => doc}/examples/aws/snapshot.yaml | 0 snapshot/{ => doc}/examples/gce/README.md | 0 snapshot/{ => doc}/examples/gce/claim.yaml | 0 snapshot/{ => doc}/examples/gce/class.yaml | 0 .../examples/gce/fromsnapshotclaim.yaml | 0 .../{ => doc}/examples/gce/provision.yaml | 0 snapshot/{ => doc}/examples/gce/pv.yaml | 0 snapshot/{ => doc}/examples/gce/pvc.yaml | 0 snapshot/{ => doc}/examples/gce/snapshot.yaml | 0 .../{ => doc}/examples/hostpath/README.md | 0 .../{ => doc}/examples/hostpath/class.yaml | 0 snapshot/{ => doc}/examples/hostpath/pv.yaml | 0 snapshot/{ => doc}/examples/hostpath/pvc.yaml | 0 .../{ => doc}/examples/hostpath/snapshot.yaml | 0 snapshot/doc/user-guide.md | 112 ++++++++++++++++++ .../{ => doc}/volume-snapshotting-proposal.md | 0 21 files changed, 112 insertions(+) rename snapshot/{ => doc}/examples/aws/README.md (100%) rename snapshot/{ => doc}/examples/aws/claim.yaml (100%) rename snapshot/{ => doc}/examples/aws/class.yaml (100%) rename snapshot/{ => doc}/examples/aws/pod.yaml (100%) rename snapshot/{ => doc}/examples/aws/pvc.yaml (100%) rename snapshot/{ => doc}/examples/aws/snapshot.yaml (100%) rename snapshot/{ => doc}/examples/gce/README.md (100%) rename snapshot/{ => doc}/examples/gce/claim.yaml (100%) rename snapshot/{ => doc}/examples/gce/class.yaml (100%) rename snapshot/{ => doc}/examples/gce/fromsnapshotclaim.yaml (100%) rename snapshot/{ => doc}/examples/gce/provision.yaml (100%) rename snapshot/{ => doc}/examples/gce/pv.yaml (100%) rename snapshot/{ => doc}/examples/gce/pvc.yaml (100%) rename snapshot/{ => doc}/examples/gce/snapshot.yaml (100%) rename snapshot/{ => doc}/examples/hostpath/README.md (100%) rename snapshot/{ => doc}/examples/hostpath/class.yaml (100%) rename snapshot/{ => doc}/examples/hostpath/pv.yaml (100%) rename snapshot/{ => doc}/examples/hostpath/pvc.yaml (100%) rename snapshot/{ => doc}/examples/hostpath/snapshot.yaml (100%) create mode 100644 snapshot/doc/user-guide.md rename snapshot/{ => doc}/volume-snapshotting-proposal.md (100%) diff --git a/snapshot/examples/aws/README.md b/snapshot/doc/examples/aws/README.md similarity index 100% rename from snapshot/examples/aws/README.md rename to snapshot/doc/examples/aws/README.md diff --git a/snapshot/examples/aws/claim.yaml b/snapshot/doc/examples/aws/claim.yaml similarity index 100% rename from snapshot/examples/aws/claim.yaml rename to snapshot/doc/examples/aws/claim.yaml diff --git a/snapshot/examples/aws/class.yaml b/snapshot/doc/examples/aws/class.yaml similarity index 100% rename from snapshot/examples/aws/class.yaml rename to snapshot/doc/examples/aws/class.yaml diff --git a/snapshot/examples/aws/pod.yaml b/snapshot/doc/examples/aws/pod.yaml similarity index 100% rename from snapshot/examples/aws/pod.yaml rename to snapshot/doc/examples/aws/pod.yaml diff --git a/snapshot/examples/aws/pvc.yaml b/snapshot/doc/examples/aws/pvc.yaml similarity index 100% rename from snapshot/examples/aws/pvc.yaml rename to snapshot/doc/examples/aws/pvc.yaml diff --git a/snapshot/examples/aws/snapshot.yaml b/snapshot/doc/examples/aws/snapshot.yaml similarity index 100% rename from snapshot/examples/aws/snapshot.yaml rename to snapshot/doc/examples/aws/snapshot.yaml diff --git a/snapshot/examples/gce/README.md b/snapshot/doc/examples/gce/README.md similarity index 100% rename from snapshot/examples/gce/README.md rename to snapshot/doc/examples/gce/README.md diff --git a/snapshot/examples/gce/claim.yaml b/snapshot/doc/examples/gce/claim.yaml similarity index 100% rename from snapshot/examples/gce/claim.yaml rename to snapshot/doc/examples/gce/claim.yaml diff --git a/snapshot/examples/gce/class.yaml b/snapshot/doc/examples/gce/class.yaml similarity index 100% rename from snapshot/examples/gce/class.yaml rename to snapshot/doc/examples/gce/class.yaml diff --git a/snapshot/examples/gce/fromsnapshotclaim.yaml b/snapshot/doc/examples/gce/fromsnapshotclaim.yaml similarity index 100% rename from snapshot/examples/gce/fromsnapshotclaim.yaml rename to snapshot/doc/examples/gce/fromsnapshotclaim.yaml diff --git a/snapshot/examples/gce/provision.yaml b/snapshot/doc/examples/gce/provision.yaml similarity index 100% rename from snapshot/examples/gce/provision.yaml rename to snapshot/doc/examples/gce/provision.yaml diff --git a/snapshot/examples/gce/pv.yaml b/snapshot/doc/examples/gce/pv.yaml similarity index 100% rename from snapshot/examples/gce/pv.yaml rename to snapshot/doc/examples/gce/pv.yaml diff --git a/snapshot/examples/gce/pvc.yaml b/snapshot/doc/examples/gce/pvc.yaml similarity index 100% rename from snapshot/examples/gce/pvc.yaml rename to snapshot/doc/examples/gce/pvc.yaml diff --git a/snapshot/examples/gce/snapshot.yaml b/snapshot/doc/examples/gce/snapshot.yaml similarity index 100% rename from snapshot/examples/gce/snapshot.yaml rename to snapshot/doc/examples/gce/snapshot.yaml diff --git a/snapshot/examples/hostpath/README.md b/snapshot/doc/examples/hostpath/README.md similarity index 100% rename from snapshot/examples/hostpath/README.md rename to snapshot/doc/examples/hostpath/README.md diff --git a/snapshot/examples/hostpath/class.yaml b/snapshot/doc/examples/hostpath/class.yaml similarity index 100% rename from snapshot/examples/hostpath/class.yaml rename to snapshot/doc/examples/hostpath/class.yaml diff --git a/snapshot/examples/hostpath/pv.yaml b/snapshot/doc/examples/hostpath/pv.yaml similarity index 100% rename from snapshot/examples/hostpath/pv.yaml rename to snapshot/doc/examples/hostpath/pv.yaml diff --git a/snapshot/examples/hostpath/pvc.yaml b/snapshot/doc/examples/hostpath/pvc.yaml similarity index 100% rename from snapshot/examples/hostpath/pvc.yaml rename to snapshot/doc/examples/hostpath/pvc.yaml diff --git a/snapshot/examples/hostpath/snapshot.yaml b/snapshot/doc/examples/hostpath/snapshot.yaml similarity index 100% rename from snapshot/examples/hostpath/snapshot.yaml rename to snapshot/doc/examples/hostpath/snapshot.yaml diff --git a/snapshot/doc/user-guide.md b/snapshot/doc/user-guide.md new file mode 100644 index 00000000000..93a958d6421 --- /dev/null +++ b/snapshot/doc/user-guide.md @@ -0,0 +1,112 @@ +Volume Snapshots in Kubernetes +========================================= + +This document describes current state of Volume Snapshot support in Kubernetes provided by external controller and provisioner. Familiarity with [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/), [Persistent Volume Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) and [Dynamic Provisioning](http://blog.kubernetes.io/2016/10/dynamic-provisioning-and-storage-in-kubernetes.html) is recommended. + +# Introduction + +Many storage systems provide the ability to create "snapshots" of a persistent volume to protect against data loss. The external snapshot controller and provisioner provide means to use the feature in Kubernetes cluster and handle the volume snapshots through Kubernetes API. + +# Features + +* Create snapshot of a `PersistentVolume` bound to a `PersistentVolumeClaim` +* List existing `VolumeSnapshots` +* Delete existing `VolumeSnapshot` +* Create a new `PersistentVolume` from an existing `VolumeSnapshot` +* Supported `PersistentVolume` [types](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes): + * Amazon EBS + * GCE PD + * HostPath + +# Lifecycle of a Volume Snapshot and Volume Snapshot Data + +## Prerequisites +Prerequisites for using the snapshotting features are described in sections "Persistent Volume Claim and Persistent Volume" and "Snapshot Promoter". + +### Persistent Volume Claim and Persistent Volume +The user already created a Persistent Volume Claim that is bound to a Persistent Volume. The Persistent Volume type must be one of the snaphot supported Persistent Volume types. + +### Snapshot Promoter +An admin created a Storage Class like the one shown below: +```yaml +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: snapshot-promoter +provisioner: volumesnapshot.external-storage.k8s.io/snapshot-promoter +``` +Such Storage Class is necessary for restoring a Persistent Volume from already created Volume Snapshot and Volume Snapshot Data. + +## Creating +Each `VolumeSnapshot` contains a spec and status, which is the specification and status of the Volume Snapshot. +```yaml +apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 +kind: VolumeSnapshot +metadata: + name: snapshot-demo +spec: + persistentVolumeClaimName: ebs-pvc +``` + +* `persistentVolumeClaimName`: name of the Persistent Volume Claim that is bound to a Persistent Volume. This particular Persistent Volume will be snapshotted. + +Volume Snapshot Data is automatically created based on the Volume Snapshot. Relationship between Volume Snapshot and Volume Snapshot Data is similar to the relationship between Persistent Volume Claim and Persistent Volume. + +Depending on the Persistent Volume type the operation might go through several phases which are reflected by the Volume Snapshot status: +1. The Volume Snapshot Data is created and is bound to the Volume Snapshot. +2. The controller starts the snapshot operation: the snapshotted Persistent Volume might need to be frozen and the applications paused. +3. The storage system finishes creating the snapshot (the snapshot is "cut") and the snapshotted Persistent Volume might return to normal operation. The snapshot itself is not ready yet. The last status condition is of `Created` type with status value "True" +4. The newly created snapshot is completed and ready to use. The last status condition is of `Ready` type with status value "True" + +A Volume Snapshot status can be displayed as shown below: +```yaml +$ kubectl get volumesnapshot -o yaml +apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 + kind: VolumeSnapshot + metadata: + clusterName: "" + creationTimestamp: 2017-09-19T13:58:28Z + generation: 0 + labels: + Timestamp: "1505829508178510973" + name: snapshot-demo + namespace: default + resourceVersion: "780" + selfLink: /apis/volume-snapshot-data.external-storage.k8s.io/v1/namespaces/default/volumesnapshots/snapshot-demo + uid: 9cc5da57-9d42-11e7-9b25-90b11c132b3f + spec: + persistentVolumeClaimName: pvc-hostpath + snapshotDataName: k8s-volume-snapshot-9cc8813e-9d42-11e7-8bed-90b11c132b3f + status: + conditions: + - lastTransitionTime: null + message: Snapshot created successfully + reason: "" + status: "True" + type: Ready + creationTimestamp: null +``` + +## Restoring +In order to restore a Persistent Volume from a Volume Snapshot a user creates the following Persistent Volume Claim: +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: snapshot-pv-provisioning-demo + annotations: + snapshot.alpha.kubernetes.io/snapshot: snapshot-demo +spec: + storageClassName: snapshot-promoter +``` +* `annotations`: `snapshot.alpha.kubernetes.io/snapshot`: the name of the Volume Snapshot that will be restored. +* `storageClassName`: Storage Class created by admin for restoring Volume Snapshots. + +A Persistent Volume will be created and bound to the Persistent Volume Claim. The process may take several minutes depending on the Persistent Volume Type. + +## Deleting +A Volume Snapshot `snapshot-demo` can be deleted as shown below: +``` +$ kubectl delete -f volumesnapshot/snapshot-demo +``` +The Volume Snapshot Data that are bound to the Volume Snapshot are also automatically deleted. diff --git a/snapshot/volume-snapshotting-proposal.md b/snapshot/doc/volume-snapshotting-proposal.md similarity index 100% rename from snapshot/volume-snapshotting-proposal.md rename to snapshot/doc/volume-snapshotting-proposal.md From 7c4adcf0ea611504878dcb26eba586c443ada1a7 Mon Sep 17 00:00:00 2001 From: Tomas Smetana Date: Fri, 22 Sep 2017 13:09:26 +0200 Subject: [PATCH 24/68] Snapshot: Small fixes in the user guide --- snapshot/doc/user-guide.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/snapshot/doc/user-guide.md b/snapshot/doc/user-guide.md index 93a958d6421..4de707a0d52 100644 --- a/snapshot/doc/user-guide.md +++ b/snapshot/doc/user-guide.md @@ -17,6 +17,7 @@ Many storage systems provide the ability to create "snapshots" of a persistent v * Amazon EBS * GCE PD * HostPath + * OpenStack Cinder # Lifecycle of a Volume Snapshot and Volume Snapshot Data @@ -53,14 +54,22 @@ spec: Volume Snapshot Data is automatically created based on the Volume Snapshot. Relationship between Volume Snapshot and Volume Snapshot Data is similar to the relationship between Persistent Volume Claim and Persistent Volume. Depending on the Persistent Volume type the operation might go through several phases which are reflected by the Volume Snapshot status: -1. The Volume Snapshot Data is created and is bound to the Volume Snapshot. + +1. The new Volume Snapshot object is created. 2. The controller starts the snapshot operation: the snapshotted Persistent Volume might need to be frozen and the applications paused. -3. The storage system finishes creating the snapshot (the snapshot is "cut") and the snapshotted Persistent Volume might return to normal operation. The snapshot itself is not ready yet. The last status condition is of `Created` type with status value "True" +3. The storage system finishes creating the snapshot (the snapshot is "cut") and the snapshotted Persistent Volume might return to normal operation. The snapshot itself is not ready yet. The last status condition is of `Pending` type with status value "True". A new VolumeSnapshotData object is created to represent the actual snapshot. 4. The newly created snapshot is completed and ready to use. The last status condition is of `Ready` type with status value "True" +*Notes* + +* It is the user's responsibility to ensure the data consistency (stop the pod/application, flush caches, freeze the filesystem, ...). +* In case of error in any of the steps the Volume Snapshot status is appended with an `Error` condition. + A Volume Snapshot status can be displayed as shown below: -```yaml +```sh $ kubectl get volumesnapshot -o yaml +``` +```yaml apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: From 289f3cd6f3312d4f16432ea9c8e0d42715967bd1 Mon Sep 17 00:00:00 2001 From: Tomas Smetana Date: Fri, 22 Sep 2017 18:06:50 +0200 Subject: [PATCH 25/68] Snapshot: more user guide fixed --- snapshot/doc/user-guide.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/snapshot/doc/user-guide.md b/snapshot/doc/user-guide.md index 4de707a0d52..62ad76be1d8 100644 --- a/snapshot/doc/user-guide.md +++ b/snapshot/doc/user-guide.md @@ -38,7 +38,7 @@ provisioner: volumesnapshot.external-storage.k8s.io/snapshot-promoter ``` Such Storage Class is necessary for restoring a Persistent Volume from already created Volume Snapshot and Volume Snapshot Data. -## Creating +## Creating Snapshot Each `VolumeSnapshot` contains a spec and status, which is the specification and status of the Volume Snapshot. ```yaml apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 @@ -96,7 +96,7 @@ apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 creationTimestamp: null ``` -## Restoring +## Restoring Snapshot In order to restore a Persistent Volume from a Volume Snapshot a user creates the following Persistent Volume Claim: ```yaml apiVersion: v1 @@ -113,7 +113,7 @@ spec: A Persistent Volume will be created and bound to the Persistent Volume Claim. The process may take several minutes depending on the Persistent Volume Type. -## Deleting +## Deleting Snapshot A Volume Snapshot `snapshot-demo` can be deleted as shown below: ``` $ kubectl delete -f volumesnapshot/snapshot-demo From bf9f89076bcaa5b714629a1a9550b6a3798b5f24 Mon Sep 17 00:00:00 2001 From: Tomas Smetana Date: Wed, 4 Oct 2017 12:49:53 +0200 Subject: [PATCH 26/68] Snapshot: Fix GCE example These are fixes proposed by @liangxia in the PR#391 --- snapshot/doc/examples/gce/README.md | 8 ++++---- snapshot/doc/examples/gce/fromsnapshotclaim.yaml | 14 -------------- snapshot/doc/examples/gce/snapshot.yaml | 2 +- 3 files changed, 5 insertions(+), 19 deletions(-) delete mode 100644 snapshot/doc/examples/gce/fromsnapshotclaim.yaml diff --git a/snapshot/doc/examples/gce/README.md b/snapshot/doc/examples/gce/README.md index c15df23b20a..5c02853e72f 100644 --- a/snapshot/doc/examples/gce/README.md +++ b/snapshot/doc/examples/gce/README.md @@ -30,9 +30,9 @@ kubectl get volumesnapshot,volumesnapshotdata -o yaml --namespace=myns ## Snapshot based PV Provisioner -Unlike exiting PV provisioners that provision blank volume, Snapshot based PV provisioners create volumes based on existing snapshots. Thus new provisioners are needed. +Unlike existing PV provisioners that provision blank volume, Snapshot based PV provisioners create volumes based on existing snapshots. Thus new provisioners are needed. -There is a special annotation give to PVCs that request snapshot based PVs. As illustrated in [the example](examples/hostpath/claim.yaml), `snapshot.alpha.kubernetes.io` must point to an existing VolumeSnapshot Object +There is a special annotation give to PVCs that request snapshot based PVs. As illustrated in [the example](./claim.yaml), `snapshot.alpha.kubernetes.io` must point to an existing VolumeSnapshot Object ```yaml metadata: name: @@ -41,7 +41,7 @@ metadata: snapshot.alpha.kubernetes.io/snapshot: snapshot-demo ``` -## HostPath Volume Type +## GCE PD Volume Type #### Start PV Provisioner and Storage Class to restore a snapshot to a PV @@ -58,7 +58,7 @@ kubectl create -f examples/gce/provision.yaml ### Create a PVC that claims a PV based on an existing snapshot ```bash -kubectl create -f examples/gce/fromsnapshotclaim.yaml +kubectl create -f examples/gce/claim.yaml ``` #### Check PV and PVC are created diff --git a/snapshot/doc/examples/gce/fromsnapshotclaim.yaml b/snapshot/doc/examples/gce/fromsnapshotclaim.yaml deleted file mode 100644 index 126f91f299e..00000000000 --- a/snapshot/doc/examples/gce/fromsnapshotclaim.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: snapshot-pv-provisioning-demo - namespace: myns - annotations: - snapshot.alpha.kubernetes.io/snapshot: snapshot-1 -spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 8Gi - storageClassName: snapshot-promoter - diff --git a/snapshot/doc/examples/gce/snapshot.yaml b/snapshot/doc/examples/gce/snapshot.yaml index 13c09f5e0a3..91353b37355 100644 --- a/snapshot/doc/examples/gce/snapshot.yaml +++ b/snapshot/doc/examples/gce/snapshot.yaml @@ -1,7 +1,7 @@ apiVersion: volumesnapshot.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: - name: snapshot-1 + name: snapshot-demo namespace: myns spec: persistentVolumeClaimName: gce-pvc From fcdb61cb54b2fe011622d1a3a5807250e087d647 Mon Sep 17 00:00:00 2001 From: Tomas Smetana Date: Wed, 4 Oct 2017 16:15:03 +0200 Subject: [PATCH 27/68] Snapshot: update api versions in user guide --- snapshot/doc/user-guide.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/snapshot/doc/user-guide.md b/snapshot/doc/user-guide.md index 62ad76be1d8..e8894050465 100644 --- a/snapshot/doc/user-guide.md +++ b/snapshot/doc/user-guide.md @@ -41,7 +41,7 @@ Such Storage Class is necessary for restoring a Persistent Volume from already c ## Creating Snapshot Each `VolumeSnapshot` contains a spec and status, which is the specification and status of the Volume Snapshot. ```yaml -apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 +apiVersion: volumesnapshot.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: name: snapshot-demo @@ -70,7 +70,7 @@ A Volume Snapshot status can be displayed as shown below: $ kubectl get volumesnapshot -o yaml ``` ```yaml -apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 +apiVersion: volumesnapshot.external-storage.k8s.io/v1 kind: VolumeSnapshot metadata: clusterName: "" @@ -81,7 +81,7 @@ apiVersion: volume-snapshot-data.external-storage.k8s.io/v1 name: snapshot-demo namespace: default resourceVersion: "780" - selfLink: /apis/volume-snapshot-data.external-storage.k8s.io/v1/namespaces/default/volumesnapshots/snapshot-demo + selfLink: /apis/volumesnapshot.external-storage.k8s.io/v1/namespaces/default/volumesnapshots/snapshot-demo uid: 9cc5da57-9d42-11e7-9b25-90b11c132b3f spec: persistentVolumeClaimName: pvc-hostpath From 14cb5f4dc01cc09dc5a96b58ad227c40e23c9194 Mon Sep 17 00:00:00 2001 From: Ian Chakeres Date: Thu, 5 Oct 2017 09:53:59 -0700 Subject: [PATCH 28/68] Added recommendation for using ID on block vol --- local-volume/README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/local-volume/README.md b/local-volume/README.md index 14f8f7f599c..dbbeeb48de9 100644 --- a/local-volume/README.md +++ b/local-volume/README.md @@ -245,6 +245,13 @@ go run hack/e2e.go -- -v --test --test_args="--ginkgo.focus=\[Feature:LocalPersi Additionally, this practice will ensure that if another node with the same name is created, that any volumes on that node are unique and not mistaken for a volume on another node with the same name. +* For raw block volumes without a filesystem, use a unique ID as the symlink + name. Depending on your environment, the volume's ID in `/dev/disk/by-id/` + may contain a unique hardware serial number. Otherwise, a unique ID should be + generated. The uniqueness of the symlink name will ensure that if another + node with the same name is created, that any volumes on that node are + unique and not mistaken for a volume on another node with the same name. + ### Deleting/removing the underlying volume From ab9a4d12b9a2a32409ddecf788d51fdbb9a52f28 Mon Sep 17 00:00:00 2001 From: Jing Xu Date: Thu, 28 Sep 2017 13:54:44 -0700 Subject: [PATCH 29/68] Refactor the snapshotter code for more robustness. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR also includes the changes 1. SnapshotCreate function in gce 2. Remove Namespace for VolumeSnapshotData object syncSnapshot is the main controller method to decide what to do with a snapshot object. ``` Step1. [Check snapshot condition] It first checks the status of this snapshot object from its VolumeSnapshotCondition field. - If the condition is Ready or Failed, do nothing - If the condition is Pending, go to step 3 - If there is no condition recorded yet, check the following 1. Check if snapshot object metadata has label of “snapshotMetadataTimeStamp”. If no such label, it indicates that snapshot has not yet triggered, return statusNew which will continues with step 2. 2. If the label exist, check if there is already a VolumeSnapshotData object exist that references the given snapshot. If this object exists, update snapshot object to bind the VolumeSnapshotData with it. Return statusPending which continues with step 3 3. If no VolumeSnapshotData object, use the metadata information to construct tag and query the cloud whether a snapshot is already taken with the tag. If such snapshot can be found, construct the VolumeSnapshotData object and bind it with VolumeSnapshot object. Return statusPending which continues with step 3. 4. If no snapshot is found, return statusNew which will continue with step 2. Step2. [Take snapshot and Create SnapshotData object to bind] 1. **getPVFromVolumeSnapshot**: From snapshot data object, get PVC name and then retrieve pv object from API server. If failed, return error. 2. **updateVolumeSnapshotMetadata**: Construct tag information with current timestamp and VolumeSnapshot object’s metadata (namespace, name, and uid). Also update the VolumeSnapshot object with the timestamp and pv name (which will be used later). If update failed, return error. 3. **takeSnapshot**: call cloud provider to take the snapshot and attach the tags to the snapshot constructed from above. If it failed, return the error. Otherwise it returns the snapshot data source information and condition. 4. **createVolumeSnapshotData**: The name of the VolumeSnapshotData object is constructed from the UID of the VolumeSnapshot for easy query at later point. If object is constructed and updated to API server successfully, move to the next step. Otherwise, return error. 5. **bindandUpdateVolumeSnapshot**: It updates the VolumeSnapshot object with the VolumeSnapshotData object name and new status. Step3. [Wait until snapshot is ready or failed] This step creates a loop with exponential backoff time. Inside the loop 1. It queries the cloud provider to describe the snapshot with the status information. 2. It updates the VolumeSnapshot object with the new condition if there is any changes compared with the old condition 3. If the latest status is ready or error, break the loop and return. ``` --- .../snapshot-pv-provisioner.go | 1 - snapshot/pkg/apis/crd/v1/types.go | 2 +- snapshot/pkg/client/client.go | 4 +- .../pkg/cloudprovider/providers/gce/gce.go | 31 +- .../pkg/controller/reconciler/reconciler.go | 23 - .../pkg/controller/snapshotter/snapshotter.go | 554 ++++++++---------- .../snapshotter/snapshotter_test.go | 2 +- snapshot/pkg/volume/awsebs/processor.go | 2 +- snapshot/pkg/volume/cinder/processor.go | 2 +- snapshot/pkg/volume/gcepd/processor.go | 82 ++- 10 files changed, 346 insertions(+), 357 deletions(-) diff --git a/snapshot/cmd/snapshot-pv-provisioner/snapshot-pv-provisioner.go b/snapshot/cmd/snapshot-pv-provisioner/snapshot-pv-provisioner.go index 0a43e88d56d..b0d18d10b6a 100644 --- a/snapshot/cmd/snapshot-pv-provisioner/snapshot-pv-provisioner.go +++ b/snapshot/cmd/snapshot-pv-provisioner/snapshot-pv-provisioner.go @@ -137,7 +137,6 @@ func (p *snapshotProvisioner) Provision(options controller.VolumeOptions) (*v1.P var snapshotData crdv1.VolumeSnapshotData err = p.crdclient.Get(). Resource(crdv1.VolumeSnapshotDataResourcePlural). - Namespace(v1.NamespaceDefault). Name(snapshot.Spec.SnapshotDataName). Do().Into(&snapshotData) diff --git a/snapshot/pkg/apis/crd/v1/types.go b/snapshot/pkg/apis/crd/v1/types.go index 5d96282c010..8431d2761f8 100644 --- a/snapshot/pkg/apis/crd/v1/types.go +++ b/snapshot/pkg/apis/crd/v1/types.go @@ -37,7 +37,7 @@ type VolumeSnapshotStatus struct { // +optional CreationTimestamp metav1.Time `json:"creationTimestamp" protobuf:"bytes,1,opt,name=creationTimestamp"` - // Representes the latest available observations about the volume snapshot + // Represent the latest available observations about the volume snapshot Conditions []VolumeSnapshotCondition `json:"conditions" protobuf:"bytes,2,rep,name=conditions"` } diff --git a/snapshot/pkg/client/client.go b/snapshot/pkg/client/client.go index 5036efbb43b..d9c84ec1e83 100644 --- a/snapshot/pkg/client/client.go +++ b/snapshot/pkg/client/client.go @@ -22,7 +22,6 @@ import ( "github.com/golang/glog" crdv1 "github.com/kubernetes-incubator/external-storage/snapshot/pkg/apis/crd/v1" - apiv1 "k8s.io/api/core/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -107,7 +106,8 @@ func CreateCRD(clientset apiextensionsclient.Interface) error { // WaitForSnapshotResource waits for the snapshot resource func WaitForSnapshotResource(snapshotClient *rest.RESTClient) error { return wait.Poll(100*time.Millisecond, 60*time.Second, func() (bool, error) { - _, err := snapshotClient.Get().Namespace(apiv1.NamespaceDefault).Resource(crdv1.VolumeSnapshotDataResourcePlural).DoRaw() + _, err := snapshotClient.Get(). + Resource(crdv1.VolumeSnapshotDataResourcePlural).DoRaw() if err == nil { return true, nil } diff --git a/snapshot/pkg/cloudprovider/providers/gce/gce.go b/snapshot/pkg/cloudprovider/providers/gce/gce.go index c4e0d897ab6..26d672cb732 100644 --- a/snapshot/pkg/cloudprovider/providers/gce/gce.go +++ b/snapshot/pkg/cloudprovider/providers/gce/gce.go @@ -2812,7 +2812,7 @@ func (gce *Cloud) CreateDiskFromSnapshot(snapshot string, tagsStr, err := gce.encodeDiskTags(tags) if err != nil { - return err + return fmt.Errorf("encode disk tag error %v", err) } switch diskType { @@ -2833,11 +2833,15 @@ func (gce *Cloud) CreateDiskFromSnapshot(snapshot string, Type: diskTypeURI, SourceSnapshot: snapshotName, } - + glog.Infof("Create disk from snapshot diskToCreate %+v", diskToCreate) createOp, err := gce.service.Disks.Insert(gce.projectID, zone, diskToCreate).Do() - if isGCEError(err, "alreadyExists") { - glog.Warningf("GCE PD %q already exists, reusing", name) - return nil + glog.Infof("Create disk from snapshot operation %v, err %v", createOp, err) + if err != nil { + if isGCEError(err, "alreadyExists") { + glog.Warningf("GCE PD %q already exists, reusing", name) + return nil + } + return err } err = gce.waitForZoneOp(createOp, zone) @@ -2856,7 +2860,7 @@ func (gce *Cloud) DescribeSnapshot(snapshotToGet string) (status string, isCompl } //no snapshot is found if snapshot == nil { - return "", false, fmt.Errorf("snapshot %s is found", snapshotToGet) + return "", false, fmt.Errorf("snapshot %s is not found", snapshotToGet) } if snapshot.Status == "READY" { return snapshot.Status, true, nil @@ -2906,7 +2910,6 @@ func (gce *Cloud) getSnapshotByName(snapshotName string) (*gceSnapshot, error) { } // CreateSnapshot creates a snapshot -// TODO: CreateSnapshot should return snapshot status func (gce *Cloud) CreateSnapshot(diskName string, zone string, snapshotName string, tags map[string]string) error { isManaged := false for _, managedZone := range gce.managedZones { @@ -2920,6 +2923,7 @@ func (gce *Cloud) CreateSnapshot(diskName string, zone string, snapshotName stri } tagsStr, err := gce.encodeDiskTags(tags) if err != nil { + glog.Infof("CreateSnapshot err %v", err) return err } @@ -2927,19 +2931,10 @@ func (gce *Cloud) CreateSnapshot(diskName string, zone string, snapshotName stri Name: snapshotName, Description: tagsStr, } - + glog.V(4).Infof("Create snapshot project %s, zone %s, diskName %s, snapshotToCreate %+v", gce.projectID, zone, diskName, snapshotToCreate) createOp, err := gce.service.Disks.CreateSnapshot(gce.projectID, zone, diskName, snapshotToCreate).Do() - if err != nil { - return err - } - - err = gce.waitForZoneOp(createOp, zone) - if isGCEError(err, "alreadyExists") { - glog.Warningf("GCE PD Snapshot %q already exists, reusing", snapshotName) - return nil - } + glog.V(4).Infof("Create snapshot operation %v", createOp) return err - } func (gce *Cloud) listClustersInZone(zone string) ([]string, error) { diff --git a/snapshot/pkg/controller/reconciler/reconciler.go b/snapshot/pkg/controller/reconciler/reconciler.go index eeecf5957f4..a53bd205933 100644 --- a/snapshot/pkg/controller/reconciler/reconciler.go +++ b/snapshot/pkg/controller/reconciler/reconciler.go @@ -23,11 +23,8 @@ import ( "time" "github.com/golang/glog" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - crdv1 "github.com/kubernetes-incubator/external-storage/snapshot/pkg/apis/crd/v1" "github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/cache" "github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/snapshotter" ) @@ -128,26 +125,6 @@ func (rc *reconciler) reconcile() { // It's likely that the operation exists already: it should be fired by the controller right // after the event has arrived. rc.snapshotter.CreateVolumeSnapshot(snapshot) - } else { - // The VolumeSnapshot is in both states of world: check its OK and fix it eventually. - aswSnapshot := rc.actualStateOfWorld.GetSnapshot(name) - if aswSnapshot.Spec.SnapshotDataName == "" { - // Seems the write to API server failed before. Find the SnapshotData and fix - // this VolumeSnapshot reference and status - glog.Infof("Volume snapshot %s is missing the snapshot data name - updating.", name) - snapConditions := []crdv1.VolumeSnapshotCondition{ - { - Type: crdv1.VolumeSnapshotConditionReady, - Status: v1.ConditionTrue, - Message: "Snapshot created succsessfully", - LastTransitionTime: metav1.Now(), - }, - } - _, err := rc.snapshotter.UpdateVolumeSnapshot(name, &snapConditions) - if err != nil { - glog.Errorf("Error updating VolumeSnapshot %s: %v", name, err) - } - } } } } diff --git a/snapshot/pkg/controller/snapshotter/snapshotter.go b/snapshot/pkg/controller/snapshotter/snapshotter.go index dbeefcc053e..400f3b2eb3e 100644 --- a/snapshot/pkg/controller/snapshotter/snapshotter.go +++ b/snapshot/pkg/controller/snapshotter/snapshotter.go @@ -22,6 +22,9 @@ import ( "github.com/golang/glog" + crdv1 "github.com/kubernetes-incubator/external-storage/snapshot/pkg/apis/crd/v1" + "github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/cache" + "github.com/kubernetes-incubator/external-storage/snapshot/pkg/volume" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -31,22 +34,22 @@ import ( "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/util/goroutinemap" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" - - crdv1 "github.com/kubernetes-incubator/external-storage/snapshot/pkg/apis/crd/v1" - "github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/cache" - "github.com/kubernetes-incubator/external-storage/snapshot/pkg/volume" ) const ( + snapshotMetadataTimeStamp = "SnapshotMetadata-Timestamp" + snapshotMetadataPVName = "SnapshotMetadata-PVName" + snapshotDataNamePrefix = "k8s-volume-snapshot" + pvNameLabel = "pvName" defaultExponentialBackOffOnError = true // volumeSnapshot* is configuration of exponential backoff for - // waiting for snapshot operation to complete. Starting with 10 - // seconds, multiplying by 1.2 with each step and taking 15 steps at maximum. - // It will time out after 12.00 minutes. - volumeSnapshotInitialDelay = 10 * time.Second - volumeSnapshotFactor = 1.2 - volumeSnapshotSteps = 15 + // waiting for snapshot operation to complete. Starting with 2 + // seconds, multiplying by 1.5 with each step and taking 20 steps at maximum. + // It will time out after 20 steps (6650 seconds). + volumeSnapshotInitialDelay = 2 * time.Second + volumeSnapshotFactor = 1.5 + volumeSnapshotSteps = 20 ) // VolumeSnapshotter does the "heavy lifting": it spawns goroutines that talk to the @@ -57,8 +60,8 @@ type VolumeSnapshotter interface { CreateVolumeSnapshot(snapshot *crdv1.VolumeSnapshot) DeleteVolumeSnapshot(snapshot *crdv1.VolumeSnapshot) PromoteVolumeSnapshotToPV(snapshot *crdv1.VolumeSnapshot) - UpdateVolumeSnapshot(snapshotName string, status *[]crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshot, error) - UpdateVolumeSnapshotData(snapshotDataName string, status *[]crdv1.VolumeSnapshotDataCondition) error + //UpdateVolumeSnapshot(snapshotName string, status *[]crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshot, error) + //UpdateVolumeSnapshotData(snapshotDataName string, status *[]crdv1.VolumeSnapshotDataCondition) error } type volumeSnapshotter struct { @@ -110,16 +113,15 @@ func NewVolumeSnapshotter( } } -// TODO(xyang): Cache PV volume information into meta data to avoid query api server // Helper function to get PV from VolumeSnapshot -func (vs *volumeSnapshotter) getPVFromVolumeSnapshot(snapshotName string, snapshot *crdv1.VolumeSnapshot) (*v1.PersistentVolume, error) { +func (vs *volumeSnapshotter) getPVFromVolumeSnapshot(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) (*v1.PersistentVolume, error) { pvcName := snapshot.Spec.PersistentVolumeClaimName if pvcName == "" { - return nil, fmt.Errorf("The PVC name is not specified in snapshot %s", snapshotName) + return nil, fmt.Errorf("The PVC name is not specified in snapshot %s", uniqueSnapshotName) } - snapNameSpace, _, err := cache.GetNameAndNameSpaceFromSnapshotName(snapshotName) + snapNameSpace, _, err := cache.GetNameAndNameSpaceFromSnapshotName(uniqueSnapshotName) if err != nil { - return nil, fmt.Errorf("Snapshot %s is malformed", snapshotName) + return nil, fmt.Errorf("Snapshot %s is malformed", uniqueSnapshotName) } pvc, err := vs.coreClient.CoreV1().PersistentVolumeClaims(snapNameSpace).Get(pvcName, metav1.GetOptions{}) if err != nil { @@ -137,15 +139,16 @@ func (vs *volumeSnapshotter) getPVFromVolumeSnapshot(snapshotName string, snapsh return pv, nil } +// TODO: cache the VolumeSnapshotData list since this is only needed when controller restarts, checks +// whether there is existing VolumeSnapshotData refers to the snapshot already. // Helper function that looks up VolumeSnapshotData for a VolumeSnapshot named snapshotName -func (vs *volumeSnapshotter) getSnapshotDataFromSnapshotName(snapshotName string) *crdv1.VolumeSnapshotData { +func (vs *volumeSnapshotter) getSnapshotDataFromSnapshotName(uniqueSnapshotName string) *crdv1.VolumeSnapshotData { var snapshotDataList crdv1.VolumeSnapshotDataList var snapshotDataObj crdv1.VolumeSnapshotData var found bool err := vs.restClient.Get(). Resource(crdv1.VolumeSnapshotDataResourcePlural). - Namespace(v1.NamespaceDefault). Do().Into(&snapshotDataList) if err != nil { glog.Errorf("Error retrieving the VolumeSnapshotData objects from API server: %v", err) @@ -158,7 +161,7 @@ func (vs *volumeSnapshotter) getSnapshotDataFromSnapshotName(snapshotName string for _, snapData := range snapshotDataList.Items { if snapData.Spec.VolumeSnapshotRef != nil { name := snapData.Spec.VolumeSnapshotRef.Namespace + "/" + snapData.Spec.VolumeSnapshotRef.Name - if name == snapshotName || snapData.Spec.VolumeSnapshotRef.Name == snapshotName { + if name == uniqueSnapshotName || snapData.Spec.VolumeSnapshotRef.Name == uniqueSnapshotName { snapshotDataObj = snapData found = true break @@ -166,30 +169,37 @@ func (vs *volumeSnapshotter) getSnapshotDataFromSnapshotName(snapshotName string } } if !found { - glog.Errorf("Error: no VolumeSnapshotData for VolumeSnapshot %s found", snapshotName) + glog.V(4).Infof("Error: no VolumeSnapshotData for VolumeSnapshot %s found", uniqueSnapshotName) return nil } return &snapshotDataObj } -// Query status of the snapshot from plugin and update the status of VolumeSnapshot and VolumeSnapshotData -// if needed. Finish waiting when the snapshot becomes available/ready or error. -func (vs *volumeSnapshotter) waitForSnapshot(snapshotName string, snapshot *crdv1.VolumeSnapshot, snapshotDataObj *crdv1.VolumeSnapshotData) error { - // Get a fresh VolumeSnapshot from API - var snapshotObj crdv1.VolumeSnapshot +// Helper function that looks up VolumeSnapshotData from a VolumeSnapshot +func (vs *volumeSnapshotter) getSnapshotDataFromSnapshot(snapshot *crdv1.VolumeSnapshot) (*crdv1.VolumeSnapshotData, error) { + var snapshotDataObj crdv1.VolumeSnapshotData + snapshotDataName := snapshot.Spec.SnapshotDataName + if snapshotDataName == "" { + return nil, fmt.Errorf("Could not find snapshot data object: SnapshotDataName in snapshot spec is empty") + } err := vs.restClient.Get(). - Name(snapshot.Metadata.Name). - Resource(crdv1.VolumeSnapshotResourcePlural). - Namespace(snapshot.Metadata.Namespace). - Do().Into(&snapshotObj) + Name(snapshotDataName). + Resource(crdv1.VolumeSnapshotDataResourcePlural). + Do().Into(&snapshotDataObj) if err != nil { - return fmt.Errorf("Error getting snapshot object %s from API server: %v", snapshotName, err) + glog.Errorf("Error retrieving the VolumeSnapshotData objects from API server: %v", err) + return nil, fmt.Errorf("Could not get snapshot data object %s: %v", snapshotDataName, err) } - snapshotDataName := snapshotObj.Spec.SnapshotDataName - glog.Infof("In waitForSnapshot: snapshot %s snapshot data %s", snapshotName, snapshotDataName) + return &snapshotDataObj, nil +} + +// Query status of the snapshot from plugin and update the status of VolumeSnapshot and VolumeSnapshotData +// if needed. Finish waiting when the snapshot becomes available/ready or error. +func (vs *volumeSnapshotter) waitForSnapshot(uniqueSnapshotName string, snapshotObj *crdv1.VolumeSnapshot, snapshotDataObj *crdv1.VolumeSnapshotData) error { + glog.Infof("In waitForSnapshot: snapshot %s snapshot data %s", uniqueSnapshotName, snapshotObj.Spec.SnapshotDataName) if snapshotDataObj == nil { - return fmt.Errorf("Failed to update VolumeSnapshot for snapshot %s: no VolumeSnapshotData", snapshotName) + return fmt.Errorf("Failed to update VolumeSnapshot for snapshot %s: no VolumeSnapshotData", uniqueSnapshotName) } spec := &snapshotDataObj.Spec @@ -202,9 +212,6 @@ func (vs *volumeSnapshotter) waitForSnapshot(snapshotName string, snapshot *crdv return fmt.Errorf("%s is not supported volume for %#v", volumeType, spec) } - var newSnapshot *crdv1.VolumeSnapshot - var conditions *[]crdv1.VolumeSnapshotCondition - var status, newstatus string = "", "" backoff := wait.Backoff{ Duration: volumeSnapshotInitialDelay, Factor: volumeSnapshotFactor, @@ -212,61 +219,30 @@ func (vs *volumeSnapshotter) waitForSnapshot(snapshotName string, snapshot *crdv } // Wait until the snapshot is successfully created by the plugin or an error occurs that // fails the snapshot creation. - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - conditions, _, err = plugin.DescribeSnapshot(snapshotDataObj) + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + conditions, _, err := plugin.DescribeSnapshot(snapshotDataObj) if err != nil { - glog.Warningf("failed to get snapshot %v, err: %v", snapshotName, err) + glog.Warningf("failed to get snapshot %v, err: %v", uniqueSnapshotName, err) //continue waiting return false, nil } - newstatus = vs.getSimplifiedSnapshotStatus(conditions) - if newstatus == statusPending { - if newstatus != status { - status = newstatus - glog.V(5).Infof("Snapshot %s creation is not complete yet. Status: [%#v] Retrying...", snapshotName, conditions) - // UpdateVolmeSnapshot status - newSnapshot, err = vs.UpdateVolumeSnapshot(snapshotName, conditions) - if err != nil { - glog.Errorf("Error updating volume snapshot %s: %v", snapshotName, err) - return true, fmt.Errorf("Failed to update VolumeSnapshot for snapshot %s: %v", snapshotName, err) - } - } - // continue waiting - return false, nil - } else if newstatus == statusError { - return true, fmt.Errorf("Status for snapshot %s is error", snapshotName) - } - glog.Infof("waitForSnapshot: Snapshot %s creation is complete: %#v", snapshotName, conditions) - - newSnapshot, err = vs.UpdateVolumeSnapshot(snapshotName, conditions) - if err != nil { - glog.Errorf("Error updating volume snapshot %s: %v", snapshotName, err) - return true, fmt.Errorf("Failed to update VolumeSnapshot for snapshot %s: %v", snapshotName, err) - } - - ind := len(*conditions) - 1 - snapDataConditions := []crdv1.VolumeSnapshotDataCondition{ - { - Type: (crdv1.VolumeSnapshotDataConditionType)((*conditions)[ind].Type), - Status: (*conditions)[ind].Status, - Message: (*conditions)[ind].Message, - LastTransitionTime: metav1.Now(), - }, - } - // Update VolumeSnapshotData status - err = vs.UpdateVolumeSnapshotData(snapshotDataName, &snapDataConditions) + newstatus := vs.getSimplifiedSnapshotStatus(*conditions) + condition := *conditions + lastCondition := condition[len(condition)-1] + newSnapshot, err := vs.UpdateVolumeSnapshotStatus(snapshotObj, &lastCondition) if err != nil { - return true, fmt.Errorf("Error update snapshotData object %s: %v", snapshotName, err) + glog.Errorf("Error updating volume snapshot %s: %v", uniqueSnapshotName, err) } if newstatus == statusReady { - glog.Infof("waitForSnapshot: Snapshot %s created successfully. Adding it to Actual State of World.", snapshotName) + glog.Infof("waitForSnapshot: Snapshot %s created successfully. Adding it to Actual State of World.", uniqueSnapshotName) vs.actualStateOfWorld.AddSnapshot(newSnapshot) // Break out of the for loop return true, nil } else if newstatus == statusError { - return true, fmt.Errorf("Failed to create snapshot %s", snapshotName) + glog.Errorf("waitForSnapshot: Snapshot %s returns error", uniqueSnapshotName) + return true, fmt.Errorf("Failed to create snapshot %s", uniqueSnapshotName) } return false, nil }) @@ -287,12 +263,12 @@ func (vs *volumeSnapshotter) takeSnapshot(pv *v1.PersistentVolume, tags *map[str return nil, nil, fmt.Errorf("%s is not supported volume for %#v", volumeType, spec) } - snap, snapConditions, err := plugin.SnapshotCreate(pv, tags) + snapDataSource, snapConditions, err := plugin.SnapshotCreate(pv, tags) if err != nil { glog.Warningf("failed to snapshot %#v, err: %v", spec, err) } else { - glog.Infof("snapshot created: %v. Conditions: %#v", snap, snapConditions) - return snap, snapConditions, nil + glog.Infof("snapshot created: %v. Conditions: %#v", snapDataSource, snapConditions) + return snapDataSource, snapConditions, nil } return nil, nil, nil } @@ -318,35 +294,35 @@ func (vs *volumeSnapshotter) deleteSnapshot(spec *crdv1.VolumeSnapshotDataSpec) return nil } -func (vs *volumeSnapshotter) getSimplifiedSnapshotStatus(conditions *[]crdv1.VolumeSnapshotCondition) string { +func (vs *volumeSnapshotter) getSimplifiedSnapshotStatus(conditions []crdv1.VolumeSnapshotCondition) string { if conditions == nil { - glog.Errorf("Invalid input conditions for snapshot.") - return statusError + glog.Errorf("No conditions for this snapshot yet.") + return statusNew } - index := len(*conditions) - 1 - if len(*conditions) > 0 && - ((*conditions)[index].Type == crdv1.VolumeSnapshotConditionReady && - (*conditions)[index].Status == v1.ConditionTrue) { + if len(conditions) == 0 { + glog.Errorf("Empty condition.") + return statusNew + } + //index := len(conditions) - 1 + lastCondition := conditions[len(conditions)-1] + if lastCondition.Type == crdv1.VolumeSnapshotConditionReady && lastCondition.Status == v1.ConditionTrue { return statusReady - } else if len(*conditions) > 0 && - (*conditions)[index].Type == crdv1.VolumeSnapshotConditionError { + } else if lastCondition.Type == crdv1.VolumeSnapshotConditionError { return statusError - } else if len(*conditions) > 0 && - (*conditions)[index].Type == crdv1.VolumeSnapshotConditionPending && - ((*conditions)[index].Status == v1.ConditionTrue || - (*conditions)[index].Status == v1.ConditionUnknown) { + } else if lastCondition.Type == crdv1.VolumeSnapshotConditionPending && + (lastCondition.Status == v1.ConditionTrue || lastCondition.Status == v1.ConditionUnknown) { return statusPending } return statusNew } func (vs *volumeSnapshotter) findVolumeSnapshotMetadata(snapshot *crdv1.VolumeSnapshot) *map[string]string { - var tags *map[string]string + var tags map[string]string if snapshot.Metadata.Name != "" && snapshot.Metadata.Namespace != "" && snapshot.Metadata.UID != "" { if snapshot.Metadata.Labels != nil { - timestamp, ok := snapshot.Metadata.Labels["Timestamp"] + timestamp, ok := snapshot.Metadata.Labels[snapshotMetadataTimeStamp] if ok { - tags := make(map[string]string) + tags = make(map[string]string) tags[CloudSnapshotCreatedForVolumeSnapshotNamespaceTag] = snapshot.Metadata.Namespace tags[CloudSnapshotCreatedForVolumeSnapshotNameTag] = snapshot.Metadata.Name tags[CloudSnapshotCreatedForVolumeSnapshotUIDTag] = fmt.Sprintf("%v", snapshot.Metadata.UID) @@ -355,11 +331,11 @@ func (vs *volumeSnapshotter) findVolumeSnapshotMetadata(snapshot *crdv1.VolumeSn } } } - return tags + return &tags } -func (vs *volumeSnapshotter) getPlugin(snapshotName string, snapshot *crdv1.VolumeSnapshot) (*volume.Plugin, error) { - pv, err := vs.getPVFromVolumeSnapshot(snapshotName, snapshot) +func (vs *volumeSnapshotter) getPlugin(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) (*volume.Plugin, error) { + pv, err := vs.getPVFromVolumeSnapshot(uniqueSnapshotName, snapshot) if err != nil { return nil, err } @@ -375,169 +351,196 @@ func (vs *volumeSnapshotter) getPlugin(snapshotName string, snapshot *crdv1.Volu return &plugin, nil } -func (vs *volumeSnapshotter) updateSnapshotIfExists(snapshot *crdv1.VolumeSnapshot) (string, *crdv1.VolumeSnapshotData, error) { - var snapshotName string = snapshot.Metadata.Name +// Exame the given snapshot in detail and then return the status +func (vs *volumeSnapshotter) updateSnapshotIfExists(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) (string, *crdv1.VolumeSnapshot, error) { + snapshotName := snapshot.Metadata.Name var snapshotDataObj *crdv1.VolumeSnapshotData var snapshotDataSource *crdv1.VolumeSnapshotDataSource var conditions *[]crdv1.VolumeSnapshotCondition var err error - // Find snapshot by existing tags, and create VolumeSnapshotData - snapshotDataSource, conditions, err = vs.findSnapshot(snapshotName, snapshot) + + tags := vs.findVolumeSnapshotMetadata(snapshot) + // If there is no tag returned, snapshotting is not triggered yet, return new state + if tags == nil { + glog.Infof("No tag can be found in snapshot metadata %s", uniqueSnapshotName) + return statusNew, snapshot, nil + } + // Check whether snapshotData object is already created or not. If yes, snapshot is already + // triggered through cloud provider, bind it and return pending state + if snapshotDataObj = vs.getSnapshotDataFromSnapshotName(uniqueSnapshotName); snapshotDataObj != nil { + glog.Infof("Find snapshot data object %s from snapshot %s", snapshotDataObj.Metadata.Name, uniqueSnapshotName) + snapshotObj, err := vs.bindandUpdateVolumeSnapshot(uniqueSnapshotName, snapshotDataObj.Metadata.Name, nil) + if err != nil { + return statusError, snapshot, err + } + return statusPending, snapshotObj, nil + } + // Find snapshot through cloud provider by existing tags, and create VolumeSnapshotData if such snapshot is found + snapshotDataSource, conditions, err = vs.findSnapshotByTags(snapshotName, snapshot) if err != nil { - return statusNew, nil, nil + return statusNew, snapshot, nil + } + // Snapshot is found. Create VolumeSnapshotData, bind VolumeSnapshotData to VolumeSnapshot, and update VolumeSnapshot status + glog.Infof("updateSnapshotIfExists: create VolumeSnapshotData object for VolumeSnapshot %s.", uniqueSnapshotName) + pvName, ok := snapshot.Metadata.Labels[pvNameLabel] + if !ok { + return statusError, snapshot, fmt.Errorf("Could not find pv name from snapshot, this should not happen.") } - // Snapshot is found. Since the status is New, it suggests that - // VolumeSnapshotData has not been created yet. So create VolumeSnapshotData, - // bind VolumeSnapshotData to VolumeSnapshot, and update VolumeSnapshot status - glog.Infof("updateSnapshotIfExists: create VolumeSnapshotData object for VolumeSnapshot %s.", snapshotName) - snapshotDataObj, err = vs.createVolumeSnapshotData(snapshotName, snapshot, snapshotDataSource, conditions) + snapshotDataObj, err = vs.createVolumeSnapshotData(snapshotName, pvName, snapshotDataSource, conditions) if err != nil { - return statusError, nil, err + return statusError, snapshot, err } - glog.Infof("updateSnapshotIfExists: update VolumeSnapshot status and bind VolumeSnapshotData to VolumeSnapshot %s.", snapshotName) - _, err = vs.UpdateVolumeSnapshot(snapshotName, conditions) + glog.Infof("updateSnapshotIfExists: update VolumeSnapshot status and bind VolumeSnapshotData to VolumeSnapshot %s.", uniqueSnapshotName) + snapshotObj, err := vs.bindandUpdateVolumeSnapshot(uniqueSnapshotName, snapshotDataObj.Metadata.Name, conditions) if err != nil { - glog.Errorf("updateSnapshotIfExists: Error updating volume snapshot %s: %v", snapshotName, err) return statusError, nil, err } - - return statusPending, snapshotDataObj, nil + return statusPending, snapshotObj, nil } // Below are the closures meant to build the functions for the GoRoutineMap operations. // syncSnapshot is the main controller method to decide what to do to create a snapshot. -func (vs *volumeSnapshotter) syncSnapshot(snapshotName string, snapshot *crdv1.VolumeSnapshot) func() error { +func (vs *volumeSnapshotter) syncSnapshot(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) func() error { return func() error { + snapshotObj := snapshot + status := vs.getSimplifiedSnapshotStatus(snapshot.Status.Conditions) var err error - var snapshotDataObj *crdv1.VolumeSnapshotData - status := vs.getSimplifiedSnapshotStatus(&snapshot.Status.Conditions) + // When the condition is new, it is still possible that snapshot is already triggered but has not yet updated the condition. + // Check the metadata and avaiable VolumeSnapshotData objects and update the snapshot accordingly if status == statusNew { - status, snapshotDataObj, err = vs.updateSnapshotIfExists(snapshot) - } else if status == statusPending { - // If we are here, takeSnapshot has already happened and - // VolumeSnapshotData object is already created - snapshotDataObj = vs.getSnapshotDataFromSnapshotName(snapshotName) - if snapshotDataObj == nil { - return fmt.Errorf("VolumeSnapshotData not found for snapshot %s", snapshotName) + status, snapshotObj, err = vs.updateSnapshotIfExists(uniqueSnapshotName, snapshot) + if err != nil { + glog.Errorf("updateSnapshotIfExists has error %v", err) } } switch status { case statusReady: + glog.Infof("Snapshot %s created successfully. Adding it to Actual State of World.", uniqueSnapshotName) + vs.actualStateOfWorld.AddSnapshot(snapshot) return nil case statusError: - glog.Infof("syncSnapshot: Error creating snapshot %s.", snapshotName) - return fmt.Errorf("Error creating snapshot %s", snapshotName) + glog.Infof("syncSnapshot: Error creating snapshot %s.", uniqueSnapshotName) + return fmt.Errorf("Error creating snapshot %s", uniqueSnapshotName) case statusPending: - glog.Infof("syncSnapshot: Snapshot %s is Pending.", snapshotName) + glog.V(4).Infof("syncSnapshot: Snapshot %s is Pending.", uniqueSnapshotName) // Query the volume plugin for the status of the snapshot with snapshot id // from VolumeSnapshotData object. - // Add snapshot to Actual State of World when snapshot is Ready. - err = vs.waitForSnapshot(snapshotName, snapshot, snapshotDataObj) + snapshotDataObj, err := vs.getSnapshotDataFromSnapshot(snapshotObj) + if err != nil { + return fmt.Errorf("Failed to find snapshot %v", err) + } + err = vs.waitForSnapshot(uniqueSnapshotName, snapshotObj, snapshotDataObj) if err != nil { - return fmt.Errorf("Failed to create snapshot %s", snapshotName) + return fmt.Errorf("Failed to check snapshot state %s with error %v", uniqueSnapshotName, err) } - glog.Infof("syncSnapshot: Snapshot %s created successfully.", snapshotName) + glog.Infof("syncSnapshot: Snapshot %s created successfully.", uniqueSnapshotName) return nil case statusNew: - glog.Infof("syncSnapshot: Creating snapshot %s ...", snapshotName) - ret := vs.createSnapshot(snapshotName, snapshot) - return ret + glog.Infof("syncSnapshot: Creating snapshot %s ...", uniqueSnapshotName) + err = vs.createSnapshot(uniqueSnapshotName, snapshotObj) + return err } - return fmt.Errorf("Error occurred when creating snapshot %s", snapshotName) + return fmt.Errorf("Error occurred when creating snapshot %s, unknown status %s", uniqueSnapshotName, status) } } -func (vs *volumeSnapshotter) findSnapshot(snapshotName string, snapshot *crdv1.VolumeSnapshot) (*crdv1.VolumeSnapshotDataSource, *[]crdv1.VolumeSnapshotCondition, error) { - glog.Infof("findSnapshot: snapshot %s", snapshotName) +func (vs *volumeSnapshotter) findSnapshotByTags(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) (*crdv1.VolumeSnapshotDataSource, *[]crdv1.VolumeSnapshotCondition, error) { + glog.Infof("findSnapshot: snapshot %s", uniqueSnapshotName) var snapshotDataSource *crdv1.VolumeSnapshotDataSource var conditions *[]crdv1.VolumeSnapshotCondition tags := vs.findVolumeSnapshotMetadata(snapshot) if tags != nil { - plugin, err := vs.getPlugin(snapshotName, snapshot) + plugin, err := vs.getPlugin(uniqueSnapshotName, snapshot) if plugin == nil { glog.Errorf("Failed to get volume plugin. %v", err) - return nil, nil, fmt.Errorf("Failed to get volume plugin to create snapshot %s", snapshotName) + return nil, nil, fmt.Errorf("Failed to get volume plugin to create snapshot %s", uniqueSnapshotName) } // Check whether the real snapshot is already created by the plugin - glog.Infof("findSnapshot: find snapshot %s by tags %v.", snapshotName, tags) + glog.Infof("findSnapshot: find snapshot %s by tags %v.", uniqueSnapshotName, tags) snapshotDataSource, conditions, err = (*plugin).FindSnapshot(tags) if err == nil { - glog.Infof("findSnapshot: found snapshot %s.", snapshotName) + glog.Infof("findSnapshot: found snapshot %s.", uniqueSnapshotName) return snapshotDataSource, conditions, nil } return nil, nil, err } - return nil, nil, fmt.Errorf("No metadata found in snapshot %s", snapshotName) + return nil, nil, fmt.Errorf("No metadata found in snapshot %s", uniqueSnapshotName) } -func (vs *volumeSnapshotter) createSnapshot(snapshotName string, snapshot *crdv1.VolumeSnapshot) error { +// The function goes through the whole snapshot creation process. +// 1. Update VolumeSnapshot metadata to include the snapshotted PV name, timestamp and snapshot uid, also generate tag for cloud provider +// 2. Trigger the snapshot through cloud provider and attach the tag to the snapshot. +// 3. Create the VolumeSnapshotData object with the snapshot id information returned from step 2. +// 4. Bind the VolumeSnapshot and VolumeSnapshotData object +// 5. Query the snapshot status through cloud provider and update the status until snapshot is ready or fails. +func (vs *volumeSnapshotter) createSnapshot(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) error { var snapshotDataSource *crdv1.VolumeSnapshotDataSource var snapStatus *[]crdv1.VolumeSnapshotCondition var err error var tags *map[string]string - glog.Infof("createSnapshot: Create metadata for snapshot %s.", snapshotName) - tags, err = vs.updateVolumeSnapshotMetadata(snapshot) + glog.Infof("createSnapshot: Creating snapshot %s through the plugin ...", uniqueSnapshotName) + pv, err := vs.getPVFromVolumeSnapshot(uniqueSnapshotName, snapshot) if err != nil { - return fmt.Errorf("Failed to update metadata for volume snapshot %s: %q", snapshotName, err) + return err } - glog.Infof("createSnapshot: Creating snapshot %s through the plugin ...", snapshotName) - pv, err := vs.getPVFromVolumeSnapshot(snapshotName, snapshot) + glog.Infof("createSnapshot: Creating metadata for snapshot %s.", uniqueSnapshotName) + tags, err = vs.updateVolumeSnapshotMetadata(snapshot, pv.Name) if err != nil { - return err + return fmt.Errorf("Failed to update metadata for volume snapshot %s: %q", uniqueSnapshotName, err) } + snapshotDataSource, snapStatus, err = vs.takeSnapshot(pv, tags) if err != nil || snapshotDataSource == nil { return fmt.Errorf("Failed to take snapshot of the volume %s: %q", pv.Name, err) } - glog.Infof("createSnapshot: create VolumeSnapshotData object for VolumeSnapshot %s.", snapshotName) - snapshotDataObj, err := vs.createVolumeSnapshotData(snapshotName, snapshot, snapshotDataSource, snapStatus) + glog.Infof("createSnapshot: create VolumeSnapshotData object for VolumeSnapshot %s.", uniqueSnapshotName) + snapshotDataObj, err := vs.createVolumeSnapshotData(uniqueSnapshotName, pv.Name, snapshotDataSource, snapStatus) if err != nil { return err } - glog.Infof("createSnapshot: Update VolumeSnapshot status and bind VolumeSnapshotData to VolumeSnapshot %s.", snapshotName) - _, err = vs.UpdateVolumeSnapshot(snapshotName, snapStatus) + glog.Infof("createSnapshot: Update VolumeSnapshot status and bind VolumeSnapshotData to VolumeSnapshot %s.", uniqueSnapshotName) + snapshotObj, err := vs.bindandUpdateVolumeSnapshot(uniqueSnapshotName, snapshotDataObj.Metadata.Name, snapStatus) if err != nil { - glog.Errorf("createSnapshot: Error updating volume snapshot %s: %v", snapshotName, err) - return fmt.Errorf("Failed to update VolumeSnapshot for snapshot %s", snapshotName) + glog.Errorf("createSnapshot: Error updating volume snapshot %s: %v", uniqueSnapshotName, err) + return fmt.Errorf("Failed to update VolumeSnapshot for snapshot %s", uniqueSnapshotName) } // Waiting for snapshot to be ready - err = vs.waitForSnapshot(snapshotName, snapshot, snapshotDataObj) + err = vs.waitForSnapshot(uniqueSnapshotName, snapshotObj, snapshotDataObj) if err != nil { - return fmt.Errorf("Failed to create snapshot %s", snapshotName) + return fmt.Errorf("Failed to create snapshot %s with error %v", uniqueSnapshotName, err) } - glog.Infof("createSnapshot: Snapshot %s created successfully.", snapshotName) + glog.Infof("createSnapshot: Snapshot %s created successfully.", uniqueSnapshotName) return nil } -func (vs *volumeSnapshotter) createVolumeSnapshotData(snapshotName string, snapshot *crdv1.VolumeSnapshot, snapshotDataSource *crdv1.VolumeSnapshotDataSource, snapStatus *[]crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshotData, error) { - conditions := *snapStatus - ind := len(conditions) - 1 - glog.Infof("createVolumeSnapshotData: Snapshot %s. Conditions: %#v", snapshotName, conditions) - readyCondition := crdv1.VolumeSnapshotDataCondition{ - Type: (crdv1.VolumeSnapshotDataConditionType)(conditions[ind].Type), - Status: conditions[ind].Status, - Message: conditions[ind].Message, - } - snapName := "k8s-volume-snapshot-" + string(uuid.NewUUID()) - - pv, err := vs.getPVFromVolumeSnapshot(snapshotName, snapshot) - if err != nil { - return nil, err +func (vs *volumeSnapshotter) createVolumeSnapshotData(uniqueSnapshotName, pvName string, + snapshotDataSource *crdv1.VolumeSnapshotDataSource, snapStatus *[]crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshotData, error) { + + glog.Infof("createVolumeSnapshotData: Snapshot %s. Conditions: %#v", uniqueSnapshotName, snapStatus) + var lastCondition crdv1.VolumeSnapshotDataCondition + if snapStatus != nil && len(*snapStatus) > 0 { + conditions := *snapStatus + ind := len(conditions) - 1 + lastCondition = crdv1.VolumeSnapshotDataCondition{ + Type: (crdv1.VolumeSnapshotDataConditionType)(conditions[ind].Type), + Status: conditions[ind].Status, + Message: conditions[ind].Message, + } } - pvName := pv.Name - + // Generate snapshotData name with the UID of snapshot object + snapDataName := fmt.Sprintf("%s-%s", snapshotDataNamePrefix, uuid.NewUUID()) snapshotData := &crdv1.VolumeSnapshotData{ Metadata: metav1.ObjectMeta{ - Name: snapName, + Name: snapDataName, }, Spec: crdv1.VolumeSnapshotDataSpec{ VolumeSnapshotRef: &v1.ObjectReference{ Kind: "VolumeSnapshot", - Name: snapshotName, + Name: uniqueSnapshotName, }, PersistentVolumeRef: &v1.ObjectReference{ Kind: "PersistentVolume", @@ -547,20 +550,21 @@ func (vs *volumeSnapshotter) createVolumeSnapshotData(snapshotName string, snaps }, Status: crdv1.VolumeSnapshotDataStatus{ Conditions: []crdv1.VolumeSnapshotDataCondition{ - readyCondition, + lastCondition, }, }, } + // TODO: Do we need to try to update VolumeSnapshotData object multiple times until it succeed? + // For all other updates, we only try it once. backoff := wait.Backoff{ Duration: volumeSnapshotInitialDelay, Factor: volumeSnapshotFactor, Steps: volumeSnapshotSteps, } var result crdv1.VolumeSnapshotData - err = wait.ExponentialBackoff(backoff, func() (bool, error) { - err = vs.restClient.Post(). + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + err := vs.restClient.Post(). Resource(crdv1.VolumeSnapshotDataResourcePlural). - Namespace(v1.NamespaceDefault). Body(snapshotData). Do().Into(&result) if err != nil { @@ -571,13 +575,13 @@ func (vs *volumeSnapshotter) createVolumeSnapshotData(snapshotName string, snaps }) if err != nil { - glog.Errorf("createVolumeSnapshotData: Error creating the VolumeSnapshotData %s: %v", snapshotName, err) - return nil, fmt.Errorf("Failed to create the VolumeSnapshotData %s for snapshot %s", snapName, snapshotName) + glog.Errorf("createVolumeSnapshotData: Error creating the VolumeSnapshotData %s: %v", uniqueSnapshotName, err) + return nil, fmt.Errorf("Failed to create the VolumeSnapshotData %s for snapshot %s", snapDataName, uniqueSnapshotName) } return &result, nil } -func (vs *volumeSnapshotter) getSnapshotDeleteFunc(snapshotName string, snapshot *crdv1.VolumeSnapshot) func() error { +func (vs *volumeSnapshotter) getSnapshotDeleteFunc(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) func() error { // Delete a snapshot // 1. Find the SnapshotData corresponding to Snapshot // 1a: Not found => finish (it's been deleted already) @@ -588,14 +592,14 @@ func (vs *volumeSnapshotter) getSnapshotDeleteFunc(snapshotName string, snapshot return func() error { // TODO: get VolumeSnapshotDataSource from associated VolumeSnapshotData // then call volume delete snapshot method to delete the ot - snapshotDataObj := vs.getSnapshotDataFromSnapshotName(snapshotName) - if snapshotDataObj == nil { - return fmt.Errorf("Error getting VolumeSnapshotData for VolumeSnapshot %s", snapshotName) + snapshotDataObj, err := vs.getSnapshotDataFromSnapshot(snapshot) + if err != nil { + return fmt.Errorf("Error getting VolumeSnapshotData for VolumeSnapshot %s with error %v", uniqueSnapshotName, err) } - err := vs.deleteSnapshot(&snapshotDataObj.Spec) + err = vs.deleteSnapshot(&snapshotDataObj.Spec) if err != nil { - return fmt.Errorf("Failed to delete snapshot %s: %q", snapshotName, err) + return fmt.Errorf("Failed to delete snapshot %s: %q", uniqueSnapshotName, err) } snapshotDataName := snapshotDataObj.Metadata.Name @@ -603,19 +607,18 @@ func (vs *volumeSnapshotter) getSnapshotDeleteFunc(snapshotName string, snapshot err = vs.restClient.Delete(). Name(snapshotDataName). Resource(crdv1.VolumeSnapshotDataResourcePlural). - Namespace(v1.NamespaceDefault). Do().Into(&result) if err != nil { return fmt.Errorf("Failed to delete VolumeSnapshotData %s from API server: %q", snapshotDataName, err) } - vs.actualStateOfWorld.DeleteSnapshot(snapshotName) + vs.actualStateOfWorld.DeleteSnapshot(uniqueSnapshotName) return nil } } -func (vs *volumeSnapshotter) getSnapshotPromoteFunc(snapshotName string, snapshot *crdv1.VolumeSnapshot) func() error { +func (vs *volumeSnapshotter) getSnapshotPromoteFunc(uniqueSnapshotName string, snapshot *crdv1.VolumeSnapshot) func() error { // Promote snapshot to a PVC // 1. We have a PVC referencing a Snapshot object // 2. Find the SnapshotData corresponding to tha Snapshot @@ -647,7 +650,7 @@ func (vs *volumeSnapshotter) CreateVolumeSnapshot(snapshot *crdv1.VolumeSnapshot func (vs *volumeSnapshotter) DeleteVolumeSnapshot(snapshot *crdv1.VolumeSnapshot) { snapshotName := cache.MakeSnapshotName(snapshot.Metadata.Namespace, snapshot.Metadata.Name) operationName := snapshotOpDeletePrefix + snapshotName + snapshot.Spec.PersistentVolumeClaimName - glog.Infof("Snapshotter is about to create volume snapshot operation named %s", operationName) + glog.V(4).Infof("Snapshotter is about to delete volume snapshot operation named %s", operationName) err := vs.runningOperation.Run(operationName, vs.getSnapshotDeleteFunc(snapshotName, snapshot)) @@ -682,7 +685,8 @@ func (vs *volumeSnapshotter) PromoteVolumeSnapshotToPV(snapshot *crdv1.VolumeSna } } -func (vs *volumeSnapshotter) updateVolumeSnapshotMetadata(snapshot *crdv1.VolumeSnapshot) (*map[string]string, error) { +// Update VolumeSnapshot object with current timestamp and associated PersistentVolume name in object's metadata +func (vs *volumeSnapshotter) updateVolumeSnapshotMetadata(snapshot *crdv1.VolumeSnapshot, pvName string) (*map[string]string, error) { glog.Infof("In updateVolumeSnapshotMetadata") var snapshotObj crdv1.VolumeSnapshot // Need to get a fresh copy of the VolumeSnapshot from the API server @@ -703,10 +707,13 @@ func (vs *volumeSnapshotter) updateVolumeSnapshotMetadata(snapshot *crdv1.Volume } tags := make(map[string]string) - tags["Timestamp"] = fmt.Sprintf("%d", time.Now().UnixNano()) + tags[snapshotMetadataTimeStamp] = fmt.Sprintf("%d", time.Now().UnixNano()) + tags[snapshotMetadataPVName] = pvName snapshotCopy.Metadata.Labels = tags - glog.Infof("updateVolumeSnapshotMetadata: Metadata UID: %s Metadata Name: %s Metadata Namespace: %s Setting tags in Metadata Labels: %#v.", snapshotCopy.Metadata.UID, snapshotCopy.Metadata.Name, snapshotCopy.Metadata.Namespace, snapshotCopy.Metadata.Labels) + glog.Infof("updateVolumeSnapshotMetadata: Metadata UID: %s Metadata Name: %s Metadata Namespace: %s Setting tags in Metadata Labels: %#v.", + snapshotCopy.Metadata.UID, snapshotCopy.Metadata.Name, snapshotCopy.Metadata.Namespace, snapshotCopy.Metadata.Labels) + // TODO: Use Patch instead of Put to update the object? var result crdv1.VolumeSnapshot err = vs.restClient.Put(). Name(snapshot.Metadata.Name). @@ -722,80 +729,72 @@ func (vs *volumeSnapshotter) updateVolumeSnapshotMetadata(snapshot *crdv1.Volume cloudTags[CloudSnapshotCreatedForVolumeSnapshotNamespaceTag] = result.Metadata.Namespace cloudTags[CloudSnapshotCreatedForVolumeSnapshotNameTag] = result.Metadata.Name cloudTags[CloudSnapshotCreatedForVolumeSnapshotUIDTag] = fmt.Sprintf("%v", result.Metadata.UID) - cloudTags[CloudSnapshotCreatedForVolumeSnapshotTimestampTag] = result.Metadata.Labels["Timestamp"] + cloudTags[CloudSnapshotCreatedForVolumeSnapshotTimestampTag] = result.Metadata.Labels[snapshotMetadataTimeStamp] glog.Infof("updateVolumeSnapshotMetadata: returning cloudTags [%#v]", cloudTags) return &cloudTags, nil } -func (vs *volumeSnapshotter) UpdateVolumeSnapshot(snapshotName string, status *[]crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshot, error) { +// Update VolumeSnapshot status if the condition is changed. +func (vs *volumeSnapshotter) UpdateVolumeSnapshotStatus(snapshot *crdv1.VolumeSnapshot, condition *crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshot, error) { var snapshotObj crdv1.VolumeSnapshot - glog.Infof("In UpdateVolumeSnapshot") - // Get a fresh copy of the VolumeSnapshot from the API server - snapNameSpace, snapName, err := cache.GetNameAndNameSpaceFromSnapshotName(snapshotName) - if err != nil { - return nil, fmt.Errorf("Error getting namespace and name from VolumeSnapshot name %s: %v", snapshotName, err) - } - glog.Infof("UpdateVolumeSnapshot: Namespace %s Name %s", snapNameSpace, snapName) - err = vs.restClient.Get(). - Name(snapName). + err := vs.restClient.Get(). + Name(snapshot.Metadata.Name). Resource(crdv1.VolumeSnapshotResourcePlural). - Namespace(snapNameSpace). + Namespace(snapshot.Metadata.Namespace). Do().Into(&snapshotObj) - - objCopy, err := vs.scheme.DeepCopy(&snapshotObj) if err != nil { - return nil, fmt.Errorf("Error copying snapshot object %s object from API server: %v", snapshotName, err) - } - snapshotCopy, ok := objCopy.(*crdv1.VolumeSnapshot) - if !ok { - return nil, fmt.Errorf("Error: expecting type VolumeSnapshot but received type %T", objCopy) + return nil, err } + objCopy, err := vs.scheme.DeepCopy(&snapshotObj.Status) + oldStatus := objCopy.(*crdv1.VolumeSnapshotStatus) - snapshotDataObj := vs.getSnapshotDataFromSnapshotName(snapshotName) - if snapshotDataObj == nil { - return nil, fmt.Errorf("Error: VolumeSnapshotData not found for snapshot %s", snapName) - } - glog.Infof("UpdateVolumeSnapshot: Setting VolumeSnapshotData name %s in VolumeSnapshot object %s", snapshotDataObj.Metadata.Name, snapName) - snapshotCopy.Spec.SnapshotDataName = snapshotDataObj.Metadata.Name - - if status != nil && len(*status) > 0 { - glog.Infof("UpdateVolumeSnapshot: Setting status in VolumeSnapshot object.") - // Add the new condition to existing ones if it has a different type or - // update an existing condition of the same type. - ind := len(*status) - 1 - ind2 := len(snapshotCopy.Status.Conditions) - if ind2 < 1 || snapshotCopy.Status.Conditions[ind2-1].Type != (*status)[ind].Type { - snapshotCopy.Status.Conditions = append(snapshotCopy.Status.Conditions, (*status)[ind]) - } else if snapshotCopy.Status.Conditions[ind2-1].Type == (*status)[ind].Type { - snapshotCopy.Status.Conditions[ind2-1] = (*status)[ind] + status := snapshotObj.Status + isEqual := false + if oldStatus.Conditions == nil || len(oldStatus.Conditions) == 0 || condition.Type != oldStatus.Conditions[len(oldStatus.Conditions)-1].Type { + status.Conditions = append(status.Conditions, *condition) + } else { + oldCondition := oldStatus.Conditions[len(oldStatus.Conditions)-1] + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime } - } - glog.Infof("Updating VolumeSnapshot object [%#v]", snapshotCopy) - // TODO: Make diff of the two objects and then use restClient.Patch to update it - var result crdv1.VolumeSnapshot - err = vs.restClient.Put(). - Name(snapName). - Resource(crdv1.VolumeSnapshotResourcePlural). - Namespace(snapNameSpace). - Body(snapshotCopy). - Do().Into(&result) - if err != nil { - return nil, fmt.Errorf("Error updating snapshot object %s on the API server: %v", snapshotName, err) + status.Conditions[len(status.Conditions)-1] = *condition + isEqual = condition.Type == oldCondition.Type && + condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) + } + + if !isEqual { + var newSnapshotObj crdv1.VolumeSnapshot + snapshotObj.Status = status + err = vs.restClient.Put(). + Name(snapshot.Metadata.Name). + Resource(crdv1.VolumeSnapshotResourcePlural). + Namespace(snapshot.Metadata.Namespace). + Body(&snapshotObj). + Do().Into(&newSnapshotObj) + if err != nil { + return nil, err + } + glog.Infof("UpdateVolumeSnapshotStatus finishes %+v", newSnapshotObj) + return &newSnapshotObj, nil } - return &result, nil + return snapshot, nil } -func (vs *volumeSnapshotter) bindVolumeSnapshotDataToVolumeSnapshot(snapshotName string, snapshotDataName string) error { +// Bind the VolumeSnapshot and VolumeSnapshotData and udpate the status +func (vs *volumeSnapshotter) bindandUpdateVolumeSnapshot(uniqueSnapshotName string, snapshotDataName string, status *[]crdv1.VolumeSnapshotCondition) (*crdv1.VolumeSnapshot, error) { var snapshotObj crdv1.VolumeSnapshot glog.Infof("In bindVolumeSnapshotDataToVolumeSnapshot") // Get a fresh copy of the VolumeSnapshot from the API server - snapNameSpace, snapName, err := cache.GetNameAndNameSpaceFromSnapshotName(snapshotName) + snapNameSpace, snapName, err := cache.GetNameAndNameSpaceFromSnapshotName(uniqueSnapshotName) if err != nil { - return fmt.Errorf("Error getting namespace and name from VolumeSnapshot name %s: %v", snapshotName, err) + return nil, fmt.Errorf("Error getting namespace and name from VolumeSnapshot name %s: %v", uniqueSnapshotName, err) } glog.Infof("bindVolumeSnapshotDataToVolumeSnapshot: Namespace %s Name %s", snapNameSpace, snapName) err = vs.restClient.Get(). @@ -804,16 +803,20 @@ func (vs *volumeSnapshotter) bindVolumeSnapshotDataToVolumeSnapshot(snapshotName Namespace(snapNameSpace). Do().Into(&snapshotObj) + // TODO: Is copy needed here? objCopy, err := vs.scheme.DeepCopy(&snapshotObj) if err != nil { - return fmt.Errorf("Error copying snapshot object %s object from API server: %v", snapshotName, err) + return nil, fmt.Errorf("Error copying snapshot object %s object from API server: %v", uniqueSnapshotName, err) } snapshotCopy, ok := objCopy.(*crdv1.VolumeSnapshot) if !ok { - return fmt.Errorf("Error: expecting type VolumeSnapshot but received type %T", objCopy) + return nil, fmt.Errorf("Error: expecting type VolumeSnapshot but received type %T", objCopy) } snapshotCopy.Spec.SnapshotDataName = snapshotDataName + if status != nil { + snapshotCopy.Status.Conditions = *status + } glog.Infof("bindVolumeSnapshotDataToVolumeSnapshot: Updating VolumeSnapshot object [%#v]", snapshotCopy) // TODO: Make diff of the two objects and then use restClient.Patch to update it var result crdv1.VolumeSnapshot @@ -824,51 +827,8 @@ func (vs *volumeSnapshotter) bindVolumeSnapshotDataToVolumeSnapshot(snapshotName Body(snapshotCopy). Do().Into(&result) if err != nil { - return fmt.Errorf("Error updating snapshot object %s on the API server: %v", snapshotName, err) - } - - return nil -} - -func (vs *volumeSnapshotter) UpdateVolumeSnapshotData(snapshotDataName string, status *[]crdv1.VolumeSnapshotDataCondition) error { - var snapshotDataObj crdv1.VolumeSnapshotData - - glog.Infof("In UpdateVolumeSnapshotData %s", snapshotDataName) - err := vs.restClient.Get(). - Name(snapshotDataName). - Resource(crdv1.VolumeSnapshotDataResourcePlural). - Namespace(v1.NamespaceDefault). - Do().Into(&snapshotDataObj) - - objCopy, err := vs.scheme.DeepCopy(&snapshotDataObj) - if err != nil { - return fmt.Errorf("Error copying snapshot data object %s object from API server: %v", snapshotDataName, err) - } - snapshotDataCopy, ok := objCopy.(*crdv1.VolumeSnapshotData) - if !ok { - return fmt.Errorf("Error: expecting type VolumeSnapshotData but received type %T", objCopy) + return nil, fmt.Errorf("Error updating snapshot object %s on the API server: %v", uniqueSnapshotName, err) } - // Add the new condition to existing ones if it has a different type or - // update an existing condition of the same type. - ind := len(*status) - 1 - ind2 := len(snapshotDataCopy.Status.Conditions) - if ind2 < 1 || snapshotDataCopy.Status.Conditions[ind2-1].Type != (*status)[ind].Type { - snapshotDataCopy.Status.Conditions = append(snapshotDataCopy.Status.Conditions, (*status)[ind]) - } else if snapshotDataCopy.Status.Conditions[ind2-1].Type == (*status)[ind].Type { - snapshotDataCopy.Status.Conditions[ind2-1] = (*status)[ind] - } - glog.Infof("Updating VolumeSnapshotData object. Conditions: [%v]", snapshotDataCopy.Status.Conditions) - // TODO: Make diff of the two objects and then use restClient.Patch to update it - var result crdv1.VolumeSnapshotData - err = vs.restClient.Put(). - Name(snapshotDataName). - Resource(crdv1.VolumeSnapshotDataResourcePlural). - Namespace(v1.NamespaceDefault). - Body(snapshotDataCopy). - Do().Into(&result) - if err != nil { - return fmt.Errorf("Error updating snapshotdata object %s on the API server: %v", snapshotDataName, err) - } - return nil + return &result, nil } diff --git a/snapshot/pkg/controller/snapshotter/snapshotter_test.go b/snapshot/pkg/controller/snapshotter/snapshotter_test.go index a509d5a08cb..bd109341da6 100644 --- a/snapshot/pkg/controller/snapshotter/snapshotter_test.go +++ b/snapshot/pkg/controller/snapshotter/snapshotter_test.go @@ -429,7 +429,7 @@ func Test_createSnapshotData(t *testing.T) { Type: crdv1.VolumeSnapshotConditionReady, }, } - retData, err := vs.createVolumeSnapshotData("default/new-snapshot-test-1", fakeNewVolumeSnapshot(), &snapDataSource, &snapConditions) + retData, err := vs.createVolumeSnapshotData("default/new-snapshot-test-1", "fake-pv-1", &snapDataSource, &snapConditions) if err != nil { t.Errorf("Test failed, unexpected error: %v", err) } diff --git a/snapshot/pkg/volume/awsebs/processor.go b/snapshot/pkg/volume/awsebs/processor.go index f968246acbe..6b20af674f1 100644 --- a/snapshot/pkg/volume/awsebs/processor.go +++ b/snapshot/pkg/volume/awsebs/processor.go @@ -214,7 +214,7 @@ func convertAWSStatus(status string) *[]crdv1.VolumeSnapshotCondition { { Type: crdv1.VolumeSnapshotConditionReady, Status: v1.ConditionTrue, - Message: "Snapshot created succsessfully and it is ready", + Message: "Snapshot created successfully and it is ready", LastTransitionTime: metav1.Now(), }, } diff --git a/snapshot/pkg/volume/cinder/processor.go b/snapshot/pkg/volume/cinder/processor.go index 5188b34e951..a4832070fd1 100644 --- a/snapshot/pkg/volume/cinder/processor.go +++ b/snapshot/pkg/volume/cinder/processor.go @@ -172,7 +172,7 @@ func (c *cinderPlugin) convertSnapshotStatus(status string) *[]crdv1.VolumeSnaps { Type: crdv1.VolumeSnapshotConditionReady, Status: v1.ConditionTrue, - Message: "Snapshot created succsessfully and it is ready", + Message: "Snapshot created successfully and it is ready", LastTransitionTime: metav1.Now(), }, } diff --git a/snapshot/pkg/volume/gcepd/processor.go b/snapshot/pkg/volume/gcepd/processor.go index 88dc07ff0e3..2894231d744 100644 --- a/snapshot/pkg/volume/gcepd/processor.go +++ b/snapshot/pkg/volume/gcepd/processor.go @@ -27,6 +27,7 @@ import ( "github.com/kubernetes-incubator/external-storage/snapshot/pkg/cloudprovider/providers/gce" "github.com/kubernetes-incubator/external-storage/snapshot/pkg/volume" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/kubelet/apis" k8svol "k8s.io/kubernetes/pkg/volume" ) @@ -63,22 +64,24 @@ func (plugin *gcePersistentDiskPlugin) SnapshotCreate(pv *v1.PersistentVolume, t diskName := spec.GCEPersistentDisk.PDName zone := pv.Labels[apis.LabelZoneFailureDomain] snapshotName := createSnapshotName(string(pv.Name)) - glog.Infof("Jing snapshotName %s", snapshotName) - // Gather provisioning options - //tags := make(map[string]string) - //tags["kubernetes.io/created-for/snapshot/namespace"] = claim.Namespace - //tags[CloudVolumeCreatedForClaimNameTag] = claim.Name - //tags[CloudVolumeCreatedForVolumeNameTag] = pvName err := plugin.cloud.CreateSnapshot(diskName, zone, snapshotName, *tags) if err != nil { return nil, nil, err } + initConditions := []crdv1.VolumeSnapshotCondition{ + { + Type: crdv1.VolumeSnapshotConditionPending, + Status: v1.ConditionUnknown, + Message: "Snapshot creation is triggered", + LastTransitionTime: metav1.Now(), + }, + } return &crdv1.VolumeSnapshotDataSource{ GCEPersistentDiskSnapshot: &crdv1.GCEPersistentDiskSnapshotSource{ SnapshotName: snapshotName, }, - }, nil, nil + }, &initConditions, nil } func (plugin *gcePersistentDiskPlugin) SnapshotRestore(snapshotData *crdv1.VolumeSnapshotData, pvc *v1.PersistentVolumeClaim, pvName string, parameters map[string]string) (*v1.PersistentVolumeSource, map[string]string, error) { @@ -88,7 +91,7 @@ func (plugin *gcePersistentDiskPlugin) SnapshotRestore(snapshotData *crdv1.Volum return nil, nil, fmt.Errorf("failed to retrieve Snapshot spec") } if pvc == nil { - return nil, nil, fmt.Errorf("nil pvc") + return nil, nil, fmt.Errorf("pvc is nil") } snapID := snapshotData.Spec.GCEPersistentDiskSnapshot.SnapshotName @@ -124,7 +127,8 @@ func (plugin *gcePersistentDiskPlugin) SnapshotRestore(snapshotData *crdv1.Volum } zone = k8svol.ChooseZoneForVolume(zones, pvc.Name) } - + tags["source"] = k8svol.GenerateVolumeName("Created from snapshot "+snapID+" ", pvName, 255) + glog.Infof("Provisioning disk %s from snapshot %s, zone %s requestGB %d tags %v", diskName, snapID, zone, requestGB, tags) err = plugin.cloud.CreateDiskFromSnapshot(snapID, diskName, diskType, zone, requestGB, tags) if err != nil { glog.Infof("Error creating GCE PD volume: %v", err) @@ -173,9 +177,8 @@ func (plugin *gcePersistentDiskPlugin) DescribeSnapshot(snapshotData *crdv1.Volu return nil, false, fmt.Errorf("invalid VolumeSnapshotDataSource: %v", snapshotData) } snapshotID := snapshotData.Spec.GCEPersistentDiskSnapshot.SnapshotName - _, isCompleted, err = plugin.cloud.DescribeSnapshot(snapshotID) - // TODO: Convert GCE status to []crdv1.VolumeSnapshotCondition - return nil, isCompleted, err + status, isCompleted, err := plugin.cloud.DescribeSnapshot(snapshotID) + return convertGCEStatus(status), isCompleted, err } // FindSnapshot finds a VolumeSnapshot by matching metadata @@ -197,3 +200,58 @@ func (plugin *gcePersistentDiskPlugin) VolumeDelete(pv *v1.PersistentVolume) err diskName := pv.Spec.GCEPersistentDisk.PDName return plugin.cloud.DeleteDisk(diskName) } + +func convertGCEStatus(status string) *[]crdv1.VolumeSnapshotCondition { + var snapConditions []crdv1.VolumeSnapshotCondition + + switch status { + case "READY": + snapConditions = []crdv1.VolumeSnapshotCondition{ + { + Type: crdv1.VolumeSnapshotConditionReady, + Status: v1.ConditionTrue, + Message: "Snapshot created successfully and it is ready", + LastTransitionTime: metav1.Now(), + }, + } + case "FAILED": + snapConditions = []crdv1.VolumeSnapshotCondition{ + { + Type: crdv1.VolumeSnapshotConditionError, + Status: v1.ConditionTrue, + Message: "Snapshot creation failed", + LastTransitionTime: metav1.Now(), + }, + } + case "UPLOADING": + snapConditions = []crdv1.VolumeSnapshotCondition{ + { + Type: crdv1.VolumeSnapshotConditionPending, + Status: v1.ConditionTrue, + Message: "Snapshot is uploading", + LastTransitionTime: metav1.Now(), + }, + } + case "CREATING": + snapConditions = []crdv1.VolumeSnapshotCondition{ + { + Type: crdv1.VolumeSnapshotConditionPending, + Status: v1.ConditionUnknown, + Message: "Snapshot is creating", + LastTransitionTime: metav1.Now(), + }, + } + case "Deleting": + snapConditions = []crdv1.VolumeSnapshotCondition{ + { + Type: crdv1.VolumeSnapshotConditionReady, + Status: v1.ConditionUnknown, + Message: "Snapshot is deleting", + LastTransitionTime: metav1.Now(), + }, + } + + } + return &snapConditions + +} From 74fd702c1deae0224e2a66e44b5ea8880b28c222 Mon Sep 17 00:00:00 2001 From: Tomas Smetana Date: Fri, 22 Sep 2017 17:27:57 +0200 Subject: [PATCH 30/68] Snapshot: Don't ignore API error when updating metadata --- snapshot/pkg/controller/snapshotter/snapshotter.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/snapshot/pkg/controller/snapshotter/snapshotter.go b/snapshot/pkg/controller/snapshotter/snapshotter.go index 400f3b2eb3e..918c9f08280 100644 --- a/snapshot/pkg/controller/snapshotter/snapshotter.go +++ b/snapshot/pkg/controller/snapshotter/snapshotter.go @@ -695,6 +695,9 @@ func (vs *volumeSnapshotter) updateVolumeSnapshotMetadata(snapshot *crdv1.Volume Resource(crdv1.VolumeSnapshotResourcePlural). Namespace(snapshot.Metadata.Namespace). Do().Into(&snapshotObj) + if err != nil { + return nil, fmt.Errorf("Error retrieving VolumeSnapshot %s from API server: %v", snapshot.Metadata.Name, err) + } // Copy the snapshot object before updating it objCopy, err := vs.scheme.DeepCopy(&snapshotObj) @@ -718,11 +721,11 @@ func (vs *volumeSnapshotter) updateVolumeSnapshotMetadata(snapshot *crdv1.Volume err = vs.restClient.Put(). Name(snapshot.Metadata.Name). Resource(crdv1.VolumeSnapshotResourcePlural). - Namespace(snapshotCopy.Metadata.Namespace). + Namespace(snapshot.Metadata.Namespace). Body(snapshotCopy). Do().Into(&result) if err != nil { - return nil, fmt.Errorf("Error updating snapshot object %s on the API server: %v", snapshot.Metadata.Name, err) + return nil, fmt.Errorf("Error updating snapshot object %s/%s on the API server: %v", snapshot.Metadata.Namespace, snapshot.Metadata.Name, err) } cloudTags := make(map[string]string) From 04d64584da09cc16ef0bd590775ef12415b663c9 Mon Sep 17 00:00:00 2001 From: Humble Chirammal Date: Wed, 11 Oct 2017 17:22:11 +0530 Subject: [PATCH 31/68] Specify UNIT when sending gluster-block command when operating on gluster-block mode. Signed-off-by: Humble Chirammal --- .../cmd/glusterblock-provisioner/glusterblock-provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go b/gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go index 63c7e2ecda1..d2806154a29 100644 --- a/gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go +++ b/gluster/block/cmd/glusterblock-provisioner/glusterblock-provisioner.go @@ -361,7 +361,7 @@ func (p *glusterBlockProvisioner) createVolume(volSizeInt int, blockVol string, // Execute gluster-block command. cmd := exec.Command( config.opMode, "create", config.blockModeArgs["glustervol"]+"/"+blockVol, - "ha", haCountStr, config.blockModeArgs["hosts"], sizeStr, "--json") + "ha", haCountStr, config.blockModeArgs["hosts"], sizeStr+"GB", "--json") out, cmdErr := cmd.CombinedOutput() if cmdErr != nil { From c4425b6aae7d7283abf3fa00001c8e31657cd81d Mon Sep 17 00:00:00 2001 From: Tomas Smetana Date: Fri, 22 Sep 2017 17:32:07 +0200 Subject: [PATCH 32/68] Snapshot: add deployment, make container targets --- .gitignore | 2 + snapshot/Makefile | 35 ++++- snapshot/deploy/docker/controller/Dockerfile | 17 +++ snapshot/deploy/docker/provisioner/Dockerfile | 17 +++ .../deploy/kubernetes/aws/deployment.yaml | 47 ++++++ snapshot/deploy/kubernetes/aws/secret.yaml | 10 ++ .../deploy/kubernetes/aws/snapshot-rbac.yaml | 45 ++++++ .../kubernetes/hostpath/deployment.yaml | 37 +++++ .../kubernetes/hostpath/snapshot-rbac.yaml | 45 ++++++ .../test/hostpath_files/promote-claim.yaml | 2 +- snapshot/test/run_tests.py | 140 ++++++++++++------ 11 files changed, 351 insertions(+), 46 deletions(-) create mode 100644 snapshot/deploy/docker/controller/Dockerfile create mode 100644 snapshot/deploy/docker/provisioner/Dockerfile create mode 100644 snapshot/deploy/kubernetes/aws/deployment.yaml create mode 100644 snapshot/deploy/kubernetes/aws/secret.yaml create mode 100644 snapshot/deploy/kubernetes/aws/snapshot-rbac.yaml create mode 100644 snapshot/deploy/kubernetes/hostpath/deployment.yaml create mode 100644 snapshot/deploy/kubernetes/hostpath/snapshot-rbac.yaml diff --git a/.gitignore b/.gitignore index 715ceb6d9a0..910b8ead959 100644 --- a/.gitignore +++ b/.gitignore @@ -63,3 +63,5 @@ flycheck_*.el # snapshot binaries /snapshot/_output +/snapshot/deploy/docker/controller/snapshot-controller +/snapshot/deploy/docker/provisioner/snapshot-provisioner diff --git a/snapshot/Makefile b/snapshot/Makefile index a3c8a7eb53e..6739529e2f0 100644 --- a/snapshot/Makefile +++ b/snapshot/Makefile @@ -12,16 +12,47 @@ # See the License for the specific language governing permissions and # limitations under the License. -.PHONY: all controller clean test +ifeq ($(REGISTRY),) + REGISTRY = quay.io/external_storage/ +endif +ifeq ($(VERSION),) + VERSION = latest +endif +IMAGE_CONTROLLER = $(REGISTRY)snapshot-controller:$(VERSION) +MUTABLE_IMAGE_CONTROLLER = $(REGISTRY)snapshot-controller:latest +IMAGE_PROVISIONER = $(REGISTRY)snapshot-provisioner:$(VERSION) +MUTABLE_IMAGE_PROVISIONER = $(REGISTRY)snapshot-provisioner:latest -all: controller +.PHONY: all controller provisioner clean container container-quick push test + +all: controller provisioner controller: go build -o _output/bin/snapshot-controller cmd/snapshot-controller/snapshot-controller.go + +provisioner: go build -o _output/bin/snapshot-provisioner cmd/snapshot-pv-provisioner/snapshot-pv-provisioner.go clean: + -rm -f deploy/controller/snapshot-controller + -rm -f deploy/provisioner/snapshot-provisioner -rm -rf _output test: go test `go list ./... | grep -v 'vendor'` + +container: controller provisioner container-quick + +container-quick: + cp _output/bin/snapshot-controller deploy/docker/controller + cp _output/bin/snapshot-provisioner deploy/docker/provisioner + docker build -t $(MUTABLE_IMAGE_CONTROLLER) deploy/docker/controller + docker tag $(MUTABLE_IMAGE_CONTROLLER) $(IMAGE_CONTROLLER) + docker build -t $(MUTABLE_IMAGE_PROVISIONER) deploy/docker/provisioner + docker tag $(MUTABLE_IMAGE_PROVISIONER) $(IMAGE_PROVISIONER) + +push: container + docker push $(IMAGE_CONTROLLER) + docker push $(MUTABLE_IMAGE_CONTROLLER) + docker push $(IMAGE_PROVISIONER) + docker push $(MUTABLE_IMAGE_PROVISIONER) diff --git a/snapshot/deploy/docker/controller/Dockerfile b/snapshot/deploy/docker/controller/Dockerfile new file mode 100644 index 00000000000..d6542826a13 --- /dev/null +++ b/snapshot/deploy/docker/controller/Dockerfile @@ -0,0 +1,17 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM busybox:glibc +COPY snapshot-controller /bin/snapshot-controller +ENTRYPOINT ["/bin/snapshot-controller"] diff --git a/snapshot/deploy/docker/provisioner/Dockerfile b/snapshot/deploy/docker/provisioner/Dockerfile new file mode 100644 index 00000000000..e806e342bd9 --- /dev/null +++ b/snapshot/deploy/docker/provisioner/Dockerfile @@ -0,0 +1,17 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM busybox:glibc +COPY snapshot-provisioner /bin/snapshot-provisioner +ENTRYPOINT ["/bin/snapshot-provisioner"] diff --git a/snapshot/deploy/kubernetes/aws/deployment.yaml b/snapshot/deploy/kubernetes/aws/deployment.yaml new file mode 100644 index 00000000000..e6fc47069c5 --- /dev/null +++ b/snapshot/deploy/kubernetes/aws/deployment.yaml @@ -0,0 +1,47 @@ +kind: Deployment +apiVersion: extensions/v1beta1 +metadata: + name: snapshot-controller +spec: + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: snapshot-controller + spec: + serviceAccountName: snapshot-controller-runner + containers: + - name: snapshot-controller + image: snapshot-controller + imagePullPolicy: "IfNotPresent" + args: + - "-cloudprovider aws" + env: + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: awskeys + key: access-key-id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: awskeys + key: secret-access-key + - name: snapshot-provisioner + image: snapshot-provisioner + imagePullPolicy: "IfNotPresent" + args: + - "-cloudprovider aws" + env: + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: awskeys + key: access-key-id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: awskeys + key: secret-access-key diff --git a/snapshot/deploy/kubernetes/aws/secret.yaml b/snapshot/deploy/kubernetes/aws/secret.yaml new file mode 100644 index 00000000000..38f525f88b3 --- /dev/null +++ b/snapshot/deploy/kubernetes/aws/secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: awskeys +type: Opaque +data: + access-key-id: # base64 encoded AWS_ACCESS_KEY_ID + secret-access-key: # base64 encoded AWS_SECRET_ACCESS_KEY + + diff --git a/snapshot/deploy/kubernetes/aws/snapshot-rbac.yaml b/snapshot/deploy/kubernetes/aws/snapshot-rbac.yaml new file mode 100644 index 00000000000..a77268cae14 --- /dev/null +++ b/snapshot/deploy/kubernetes/aws/snapshot-rbac.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: snapshot-controller-runner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1alpha1 +metadata: + name: snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + - apiGroups: ["volume-snapshot-data.external-storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["volume-snapshot-data.external-storage.k8s.io"] + resources: ["volumesnapshotdatas"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: snapsot-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: snapshot-controller-role +subjects: +- kind: ServiceAccount + name: snapshot-controller-runner + namespace: default + diff --git a/snapshot/deploy/kubernetes/hostpath/deployment.yaml b/snapshot/deploy/kubernetes/hostpath/deployment.yaml new file mode 100644 index 00000000000..0050221f826 --- /dev/null +++ b/snapshot/deploy/kubernetes/hostpath/deployment.yaml @@ -0,0 +1,37 @@ +kind: Deployment +apiVersion: extensions/v1beta1 +metadata: + name: snapshot-controller +spec: + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: snapshot-controller + spec: + serviceAccountName: snapshot-controller-runner + containers: + - name: snapshot-controller + image: quay.io/external_storage/snapshot-controller + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: tmp + mountPath: /tmp + - name: hostpath-source + mountPath: /test + - name: snapshot-provisioner + image: quay.io/external_storage/snapshot-provisioner + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: tmp + mountPath: /tmp + - name: hostpath-source + mountPath: /test + volumes: + - name: tmp + hostPath: + path: /tmp + - name: hostpath-source + emptyDir: {} diff --git a/snapshot/deploy/kubernetes/hostpath/snapshot-rbac.yaml b/snapshot/deploy/kubernetes/hostpath/snapshot-rbac.yaml new file mode 100644 index 00000000000..0b4411ceaf9 --- /dev/null +++ b/snapshot/deploy/kubernetes/hostpath/snapshot-rbac.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: snapshot-controller-runner +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1alpha1 +metadata: + name: snapshot-controller-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + - apiGroups: ["volumesnapshot.external-storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["volumesnapshot.external-storage.k8s.io"] + resources: ["volumesnapshotdatas"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: snapsot-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: snapshot-controller-role +subjects: +- kind: ServiceAccount + name: snapshot-controller-runner + namespace: default + diff --git a/snapshot/test/hostpath_files/promote-claim.yaml b/snapshot/test/hostpath_files/promote-claim.yaml index bc11ed750b5..2d103b18029 100644 --- a/snapshot/test/hostpath_files/promote-claim.yaml +++ b/snapshot/test/hostpath_files/promote-claim.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: snapshot-pv-provisioning-demo - namespace: myns + namespace: default annotations: snapshot.alpha.kubernetes.io/snapshot: snapshot-demo spec: diff --git a/snapshot/test/run_tests.py b/snapshot/test/run_tests.py index 9c5093fc202..486948464a8 100755 --- a/snapshot/test/run_tests.py +++ b/snapshot/test/run_tests.py @@ -21,6 +21,7 @@ import subprocess import time import atexit +import json def reap_process(proc): proc.terminate() @@ -33,14 +34,20 @@ def reap_process(proc): provlog = sys.stdout argparser = argparse.ArgumentParser(description='Volume Snapshotter test suite') - argparser.add_argument('-n', '--no-kubernetes', dest='nokube', + argparser.add_argument('-n', '--no-kubernetes', dest='nokube', action='store_true', help='don\'t start the kubernetes daemon') + argparser.add_argument('-t', '--tests-only', dest='testsonly', action='store_true', + help='don\'t start anything, just do the tests') argparser.add_argument('-k', '--kubelog', dest='kubelog', help='file to log kubernetes messaes to (default stdout)') argparser.add_argument('-c', '--ctrllog', dest='ctrllog', help='file to log controller messaes to (default stdout)') argparser.add_argument('-p', '--provlog', dest='provlog', help='file to log provisioner messaes to (default stdout)') + argparser.add_argument('-d', '--deployment', dest='deployment', + help='path of deployment yaml file; uses containers for test') + argparser.add_argument('-r', '--rbac', dest='rbac', + help='path of rbac definitions yaml file for deployment') argparser.add_argument('testname', nargs='*', help='name of test class or method (e. g. "Hostpath")') args = argparser.parse_args() @@ -50,50 +57,97 @@ def reap_process(proc): kubesrcdir = os.getenv('KUBESRCDIR', default=os.path.join(os.getenv('HOME'), 'kubernetes')) snaptestcase.kubesrcdir = kubesrcdir + kubectl = 'kubectl' + if not args.testsonly: + # find the kubernetes dir an start the cluster (if not disabled on commandline) + if not args.nokube: + if args.kubelog: + kubelog = open(args.kubelog, mode='w') + kubectl = os.path.join(kubesrcdir, "cluster/kubectl.sh") + kubecmd = os.path.join(kubesrcdir, 'hack/local-up-cluster.sh') + # start the cluster + kubeoutputdir = os.path.join(kubesrcdir, '_output/local/bin/linux/amd64') + print("Starting " + kubecmd) + kube = subprocess.Popen([kubecmd + ' -o ' + kubeoutputdir], + shell=True, stdout=kubelog, stderr=kubelog, cwd=kubesrcdir) + # give the cluster some time to initialize + check = subprocess.Popen([kubectl + ' get nodes'], shell=True) + check.wait() + check_count = 0 + while check.returncode != 0 and check_count < 30: + time.sleep(1) + check = subprocess.Popen([kubectl + ' get nodes'], shell=True) + check.wait() + check_count = check_count + 1 + nodes_num = 0 + check_count = 0 + while nodes_num == 0 and check_count < 30: + time.sleep(1) + check_count = check_count + 1 + check = subprocess.check_output([kubectl + ' get nodes -o json'], shell=True) + try: + node_list = json.loads(check.decode("utf-8")) + nodes_num = len(node_list['items']) + except Exception as e: + pass - # find the kubernetes dir an start the cluster (if not disabled on commandline) - if not args.nokube: - if args.kubelog: - kubelog = open(args.kubelog, mode='w') - kubecmd = os.path.join(kubesrcdir, 'hack/local-up-cluster.sh') - # start the cluster - kubeoutputdir = os.path.join(kubesrcdir, '_output/local/bin/linux/amd64') - print("Starting " + kubecmd) - kube = subprocess.Popen([kubecmd + ' -o ' + kubeoutputdir], - shell=True, stdout=kubelog, stderr=kubelog, cwd=kubesrcdir) - # give the cluster some time to initialize - time.sleep(20) - kube.poll() - if kube.returncode != None: - print("Fatal: Unable to start the kubernetes cluster", file=sys.stderr) - sys.exit(1) - atexit.register(reap_process, kube) + kube.poll() + if kube.returncode != None: + print("Fatal: Unable to start the kubernetes cluster", file=sys.stderr) + sys.exit(1) + atexit.register(reap_process, kube) + if args.deployment: + deployment_file = os.path.abspath(args.deployment) + rbac_file = os.path.abspath(args.rbac) + new_rbac = subprocess.Popen([kubectl + ' create -f ' + rbac_file], + shell=True, stdout=kubelog, stderr=kubelog) + new_rbac.wait() + if new_rbac.returncode != 0: + print("Error creating the snapshot deployment rbac", file=sys.stderr) + sys.exit(1) + new_depl = subprocess.Popen([kubectl + ' create -f ' + deployment_file], + shell=True, stdout=kubelog, stderr=kubelog) + new_depl.wait() + if new_depl.returncode != 0: + print("Error creating the snapshot deployment", file=sys.stderr) + sys.exit(1) + ready_replicas = 0 + check_count = 0 + while ready_replicas == 0 and check_count < 30: + time.sleep(1) + check_count = check_count + 1 + check = subprocess.check_output([kubectl + ' get deployments -o json'], shell=True) + try: + deployment_list = json.loads(check.decode("utf-8")) + if len(deployment_list['items']) > 0: + ready_replicas = deployment_list['items'][0]['status']['readyReplicas'] + except Exception as e: + pass - kubeconfig = os.getenv('KUBECONFIG', '/var/run/kubernetes/admin.kubeconfig') - # start the controller - if args.ctrllog: - ctrllog = open(args.ctrllog, mode='w') - ctrlcmd = os.path.join(snapsrcdir, '_output/bin/snapshot-controller') - ctrl = subprocess.Popen([ctrlcmd + ' -v 10 -alsologtostderr -kubeconfig ' + kubeconfig], - shell=True, stdout=ctrllog, stderr=ctrllog, cwd=snapsrcdir) - time.sleep(10) - ctrl.poll() - if ctrl.returncode != None: - print("Fatal: Unable to start the snapshot controller", file=sys.stderr) - sys.exit(1) - atexit.register(reap_process, ctrl) - # start the provisioner - if args.provlog: - ctrllog = open(args.provlog, mode='w') - provcmd = os.path.join(snapsrcdir, '_output/bin/snapshot-provisioner') - prov = subprocess.Popen([provcmd + ' -v 10 -alsologtostderr -kubeconfig ' + kubeconfig], - shell=True, stdout=provlog, stderr=provlog, cwd=snapsrcdir) - time.sleep(10) - prov.poll() - if prov.returncode != None: - print("Fatal: Unable to start the snapshot provisioner", file=sys.stderr) - sys.exit(1) - atexit.register(reap_process, prov) + else: + kubeconfig = os.getenv('KUBECONFIG', '/var/run/kubernetes/admin.kubeconfig') + # start the controller + if args.ctrllog: + ctrllog = open(args.ctrllog, mode='w') + ctrlcmd = os.path.join(snapsrcdir, '_output/bin/snapshot-controller') + ctrl = subprocess.Popen([ctrlcmd + ' -v 10 -alsologtostderr -kubeconfig ' + kubeconfig], + shell=True, stdout=ctrllog, stderr=ctrllog, cwd=snapsrcdir) + ctrl.wait() + if ctrl.returncode != None: + print("Fatal: Unable to start the snapshot controller", file=sys.stderr) + sys.exit(1) + atexit.register(reap_process, ctrl) + # start the provisioner + if args.provlog: + ctrllog = open(args.provlog, mode='w') + provcmd = os.path.join(snapsrcdir, '_output/bin/snapshot-provisioner') + prov = subprocess.Popen([provcmd + ' -v 10 -alsologtostderr -kubeconfig ' + kubeconfig], + shell=True, stdout=provlog, stderr=provlog, cwd=snapsrcdir) + prov.wait() + if prov.returncode != None: + print("Fatal: Unable to start the snapshot provisioner", file=sys.stderr) + sys.exit(1) + atexit.register(reap_process, prov) # Load all files in this directory whose name starts with 'test' if args.testname: From 8d8fce6d7e82e01b3e1d9f6e7fa8f002771e02c3 Mon Sep 17 00:00:00 2001 From: Ian Chakeres Date: Mon, 16 Oct 2017 13:09:10 -0700 Subject: [PATCH 33/68] Fixed openstack/standalone-cinder/pkg/provisioner/provisioner.go:L115 linter error impacting build --- openstack/standalone-cinder/pkg/provisioner/provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openstack/standalone-cinder/pkg/provisioner/provisioner.go b/openstack/standalone-cinder/pkg/provisioner/provisioner.go index 4d2a5e13cda..1e910917184 100644 --- a/openstack/standalone-cinder/pkg/provisioner/provisioner.go +++ b/openstack/standalone-cinder/pkg/provisioner/provisioner.go @@ -112,7 +112,7 @@ func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.Per mapper, err = p.mb.newVolumeMapperFromConnection(connection) if err != nil { - glog.Errorf("Unable to create volume mapper: %f", err) + glog.Errorf("Unable to create volume mapper: %v", err) goto ERROR_DISCONNECT } From be4fc0e4bd4e05da828bbf8ab37e90761efc80a4 Mon Sep 17 00:00:00 2001 From: dhirajh Date: Mon, 16 Oct 2017 11:51:01 -0700 Subject: [PATCH 34/68] Inherit node labels into local volume PVs Fix check-in error Refactor code so that unit tests can be written for labels that don't exist Code review comments addressed. Code review comments addressed --- local-volume/bootstrapper/README.md | 27 ++++++++++++ local-volume/provisioner/cmd/main.go | 5 ++- local-volume/provisioner/pkg/common/common.go | 17 +++++++- .../provisioner/pkg/discovery/discovery.go | 16 +++++++- .../pkg/discovery/discovery_test.go | 41 ++++++++++++++++--- 5 files changed, 96 insertions(+), 10 deletions(-) diff --git a/local-volume/bootstrapper/README.md b/local-volume/bootstrapper/README.md index eef7718f954..c1093f37476 100644 --- a/local-volume/bootstrapper/README.md +++ b/local-volume/bootstrapper/README.md @@ -63,6 +63,33 @@ data: An example of using the bootstraper is included [here](../README.md). +### PV Label Config (optional) + +One can (optionally) also configure the provisioner to copy certain labels from the node to the PVs of +the local volumes on that node. For example in the config below, the admin has chosen to copy the labels +failure-domain.beta.kubernetes.io/zone and failure-domain.beta.kubernetes.io/region from the node to the +PVs as those zone and region labels are quite applicable to the local volume PVs of that node. + +```yaml +kind: ConfigMap +metadata: + name: local-volume-config + namespace: kube-system +data: + nodeLabelsForPV: | + - failure-domain.beta.kubernetes.io/zone + - failure-domain.beta.kubernetes.io/region + storageClassMap: | + local-fast: + hostDir: "/mnt/ssds" + mountDir: "/local-ssds" + local-slow: + hostDir: "/mnt/hdds" + mountDir: "/local-hdds" + local-storage: + hostDir: "/mnt/disks" +``` + ### Command line options To see all options, compile bootstrapper and use `-h` option, below is a curated diff --git a/local-volume/provisioner/cmd/main.go b/local-volume/provisioner/cmd/main.go index 41b125f93f6..a32b9033d22 100644 --- a/local-volume/provisioner/cmd/main.go +++ b/local-volume/provisioner/cmd/main.go @@ -69,8 +69,9 @@ func main() { glog.Info("Starting controller\n") controller.StartLocalController(client, &common.UserConfig{ - Node: node, - DiscoveryMap: provisionerConfig.StorageClassConfig, + Node: node, + DiscoveryMap: provisionerConfig.StorageClassConfig, + NodeLabelsForPV: provisionerConfig.NodeLabelsForPV, }) } diff --git a/local-volume/provisioner/pkg/common/common.go b/local-volume/provisioner/pkg/common/common.go index 8f2e3409591..7ba94006f8c 100644 --- a/local-volume/provisioner/pkg/common/common.go +++ b/local-volume/provisioner/pkg/common/common.go @@ -58,6 +58,8 @@ const ( // ProvisonerStorageClassConfig defines file name of the file which stores storage class // configuration. The file name must match to the key name used in configuration map. ProvisonerStorageClassConfig = "storageClassMap" + // ProvisionerNodeLabelsForPV contains a list of node labels to be copied to the PVs created by the provisioner + ProvisionerNodeLabelsForPV = "nodeLabelsForPV" ) // UserConfig stores all the user-defined parameters to the provisioner @@ -66,6 +68,8 @@ type UserConfig struct { Node *v1.Node // key = storageclass, value = mount configuration for the storageclass DiscoveryMap map[string]MountConfig + // Labels and their values that are added to PVs created by the provisioner + NodeLabelsForPV []string } // MountConfig stores a configuration for discoverying a specific storageclass @@ -101,6 +105,7 @@ type LocalPVConfig struct { StorageClass string ProvisionerName string AffinityAnn string + Labels map[string]string } // ProvisionerConfiguration defines Provisioner configuration objects @@ -110,13 +115,16 @@ type LocalPVConfig struct { type ProvisionerConfiguration struct { // StorageClassConfig defines configuration of Provisioner's storage classes StorageClassConfig map[string]MountConfig `json:"storageClassMap" yaml:"storageClassMap"` + // NodeLabelsForPV contains a list of node labels to be copied to the PVs created by the provisioner + NodeLabelsForPV []string `json:"nodeLabelsForPV" yaml:"nodeLabelsForPV"` } // CreateLocalPVSpec returns a PV spec that can be used for PV creation func CreateLocalPVSpec(config *LocalPVConfig) *v1.PersistentVolume { return &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ - Name: config.Name, + Name: config.Name, + Labels: config.Labels, Annotations: map[string]string{ AnnProvisionedBy: config.ProvisionerName, v1.AlphaStorageNodeAffinityAnnotation: config.AffinityAnn, @@ -168,6 +176,13 @@ func VolumeConfigToConfigMapData(config *ProvisionerConfiguration) (map[string]s return nil, fmt.Errorf("unable to Marshal volume config: %v", err) } configMapData[ProvisonerStorageClassConfig] = string(val) + if len(config.NodeLabelsForPV) > 0 { + nodeLabels, err := yaml.Marshal(config.NodeLabelsForPV) + if err != nil { + return nil, fmt.Errorf("unable to Marshal node label: %v", err) + } + configMapData[ProvisionerNodeLabelsForPV] = string(nodeLabels) + } return configMapData, nil } diff --git a/local-volume/provisioner/pkg/discovery/discovery.go b/local-volume/provisioner/pkg/discovery/discovery.go index 53ecb7e468a..fb7ca3e12a2 100644 --- a/local-volume/provisioner/pkg/discovery/discovery.go +++ b/local-volume/provisioner/pkg/discovery/discovery.go @@ -32,6 +32,7 @@ import ( // It looks for volumes in the directories specified in the discoveryMap type Discoverer struct { *common.RuntimeConfig + Labels map[string]string nodeAffinityAnn string } @@ -47,7 +48,19 @@ func NewDiscoverer(config *common.RuntimeConfig) (*Discoverer, error) { if err != nil { return nil, fmt.Errorf("Failed to convert node affinity to alpha annotation: %v", err) } - return &Discoverer{RuntimeConfig: config, nodeAffinityAnn: tmpAnnotations[v1.AlphaStorageNodeAffinityAnnotation]}, nil + + labelMap := make(map[string]string) + for _, labelName := range config.NodeLabelsForPV { + labelVal, ok := config.Node.Labels[labelName] + if ok { + labelMap[labelName] = labelVal + } + } + + return &Discoverer{ + RuntimeConfig: config, + Labels: labelMap, + nodeAffinityAnn: tmpAnnotations[v1.AlphaStorageNodeAffinityAnnotation]}, nil } func generateNodeAffinity(node *v1.Node) (*v1.NodeAffinity, error) { @@ -168,6 +181,7 @@ func (d *Discoverer) createPV(file, class string, config common.MountConfig, cap StorageClass: class, ProvisionerName: d.Name, AffinityAnn: d.nodeAffinityAnn, + Labels: d.Labels, }) _, err := d.APIUtil.CreatePV(pvSpec) diff --git a/local-volume/provisioner/pkg/discovery/discovery_test.go b/local-volume/provisioner/pkg/discovery/discovery_test.go index 64f06138e0b..7895d395888 100644 --- a/local-volume/provisioner/pkg/discovery/discovery_test.go +++ b/local-volume/provisioner/pkg/discovery/discovery_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1/helper" + "reflect" ) const ( @@ -36,12 +37,27 @@ const ( testProvisionerName = "test-provisioner" ) +var nodeLabels = map[string]string{ + "failure-domain.beta.kubernetes.io/zone": "west-1", + "failure-domain.beta.kubernetes.io/region": "west", + common.NodeLabelKey: testNodeName, + "label-that-pv-does-not-inherit": "foo"} + +var nodeLabelsForPV = []string{ + "failure-domain.beta.kubernetes.io/zone", + "failure-domain.beta.kubernetes.io/region", + common.NodeLabelKey, + "non-existent-label-that-pv-will-not-get"} + +var expectedPVLabels = map[string]string{ + "failure-domain.beta.kubernetes.io/zone": "west-1", + "failure-domain.beta.kubernetes.io/region": "west", + common.NodeLabelKey: testNodeName} + var testNode = &v1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: testNodeName, - Labels: map[string]string{ - common.NodeLabelKey: testNodeName, - }, + Name: testNodeName, + Labels: nodeLabels, }, } @@ -229,8 +245,9 @@ func testSetup(t *testing.T, test *testConfig) *Discoverer { test.apiUtil = util.NewFakeAPIUtil(test.apiShouldFail, test.cache) userConfig := &common.UserConfig{ - Node: testNode, - DiscoveryMap: scMapping, + Node: testNode, + DiscoveryMap: scMapping, + NodeLabelsForPV: nodeLabelsForPV, } runConfig := &common.RuntimeConfig{ UserConfig: userConfig, @@ -300,6 +317,17 @@ func verifyNodeAffinity(t *testing.T, pv *v1.PersistentVolume) { } } +func verifyPVLabels(t *testing.T, pv *v1.PersistentVolume) { + if len(pv.Labels) == 0 { + t.Errorf("Labels not set") + return + } + eq := reflect.DeepEqual(pv.Labels, expectedPVLabels) + if !eq { + t.Errorf("Labels not as expected %v != %v", pv.Labels, expectedPVLabels) + } +} + func verifyProvisionerName(t *testing.T, pv *v1.PersistentVolume) { if len(pv.Annotations) == 0 { t.Errorf("Annotations not set") @@ -377,6 +405,7 @@ func verifyCreatedPVs(t *testing.T, test *testConfig) { verifyProvisionerName(t, createdPV) verifyNodeAffinity(t, createdPV) + verifyPVLabels(t, createdPV) verifyCapacity(t, createdPV, expectedPV) // TODO: Verify volume type once that is supported in the API. } From 1018cfbec045196f20c619ddf5a75db13bb685d3 Mon Sep 17 00:00:00 2001 From: Ian Chakeres Date: Sat, 14 Oct 2017 05:48:22 -0700 Subject: [PATCH 35/68] Break out GetBlockCapacityByte for linux and unsupported platforms GetBlockCapacityByte for contains reference to a constant unix.BLKGETSIZE64 which is not defined for darwin. This PR breaks out the GetBlockCapacityByte method for linux and unsupported, platforms which enables make test and development activities on darwin (aka mac) to PASS. --- .../provisioner/pkg/util/volume_util.go | 20 --------- .../provisioner/pkg/util/volume_util_linux.go | 44 +++++++++++++++++++ .../pkg/util/volume_util_unsupported.go | 29 ++++++++++++ 3 files changed, 73 insertions(+), 20 deletions(-) create mode 100644 local-volume/provisioner/pkg/util/volume_util_linux.go create mode 100644 local-volume/provisioner/pkg/util/volume_util_unsupported.go diff --git a/local-volume/provisioner/pkg/util/volume_util.go b/local-volume/provisioner/pkg/util/volume_util.go index 0866e4906e6..15d501601b9 100644 --- a/local-volume/provisioner/pkg/util/volume_util.go +++ b/local-volume/provisioner/pkg/util/volume_util.go @@ -25,7 +25,6 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/volume/util" - "unsafe" ) // VolumeUtil is an interface for local filesystem operations @@ -131,25 +130,6 @@ func (u *volumeUtil) GetFsCapacityByte(fullPath string) (int64, error) { return capacity, err } -// GetBlockCapacityByte returns capacity in bytes of a block device. -// fullPath is the pathname of block device. -func (u *volumeUtil) GetBlockCapacityByte(fullPath string) (int64, error) { - file, err := os.OpenFile(fullPath, os.O_RDONLY, 0) - if err != nil { - return 0, err - } - defer file.Close() - - var size int64 - // Get size of block device into 64 bit int. - // Ref: http://www.microhowto.info/howto/get_the_size_of_a_linux_block_special_device_in_c.html - if _, _, err := unix.Syscall(unix.SYS_IOCTL, file.Fd(), unix.BLKGETSIZE64, uintptr(unsafe.Pointer(&size))); err != 0 { - return 0, err - } - - return size, err -} - var _ VolumeUtil = &FakeVolumeUtil{} // FakeVolumeUtil is a stub interface for unit testing diff --git a/local-volume/provisioner/pkg/util/volume_util_linux.go b/local-volume/provisioner/pkg/util/volume_util_linux.go new file mode 100644 index 00000000000..29748b6053b --- /dev/null +++ b/local-volume/provisioner/pkg/util/volume_util_linux.go @@ -0,0 +1,44 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "golang.org/x/sys/unix" + "os" + "unsafe" +) + +// GetBlockCapacityByte returns capacity in bytes of a block device. +// fullPath is the pathname of block device. +func (u *volumeUtil) GetBlockCapacityByte(fullPath string) (int64, error) { + file, err := os.OpenFile(fullPath, os.O_RDONLY, 0) + if err != nil { + return 0, err + } + defer file.Close() + + var size int64 + // Get size of block device into 64 bit int. + // Ref: http://www.microhowto.info/howto/get_the_size_of_a_linux_block_special_device_in_c.html + if _, _, err := unix.Syscall(unix.SYS_IOCTL, file.Fd(), unix.BLKGETSIZE64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + + return size, err +} diff --git a/local-volume/provisioner/pkg/util/volume_util_unsupported.go b/local-volume/provisioner/pkg/util/volume_util_unsupported.go new file mode 100644 index 00000000000..fe53fd99417 --- /dev/null +++ b/local-volume/provisioner/pkg/util/volume_util_unsupported.go @@ -0,0 +1,29 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" +) + +// GetBlockCapacityByte is defined here for darwin and other platforms +// so that make test suceeds on them. +func (u *volumeUtil) GetBlockCapacityByte(fullPath string) (int64, error) { + return 0, fmt.Errorf("GetBlockCapacityByte is unsupported in this build") +} From 66806541ffd7c4f2c15bdd2e7269d14397fff78a Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Thu, 19 Oct 2017 23:20:59 +0800 Subject: [PATCH 36/68] Fix links of examples and add link of GCE PD examples --- snapshot/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/snapshot/README.md b/snapshot/README.md index 7cb4c1375ce..341404505c7 100644 --- a/snapshot/README.md +++ b/snapshot/README.md @@ -11,10 +11,11 @@ Pre-alpha ## Quick Howto - - [Host Path](examples/hostpath/README.md) + - [Host Path](doc/examples/hostpath/README.md) - - [AWS EBS](examples/aws/README.md) + - [AWS EBS](doc/examples/aws/README.md) + - [GCE PD](doc/examples/gce/README.md) ## Snapshot Volume Plugin Interface From c81b564e211d36ca41eb4e860ff84059dd3e5ac5 Mon Sep 17 00:00:00 2001 From: Ian Chakeres Date: Sat, 14 Oct 2017 05:48:46 -0700 Subject: [PATCH 37/68] Round down local-volume PV capacity to an easily readable value. Added common allocation units to top level util lib alongside RoundUpSize. Added roundDownCapacityPretty and unit tests as part of local-volume discovery. --- lib/util/util.go | 8 ++++ .../provisioner/pkg/discovery/discovery.go | 20 +++++++++- .../pkg/discovery/discovery_test.go | 39 ++++++++++++++++--- 3 files changed, 61 insertions(+), 6 deletions(-) diff --git a/lib/util/util.go b/lib/util/util.go index 504ee1b5624..a1c29767ee4 100644 --- a/lib/util/util.go +++ b/lib/util/util.go @@ -18,6 +18,14 @@ package util import "k8s.io/api/core/v1" +// Common allocation units +const ( + KiB int64 = 1024 + MiB int64 = 1024 * KiB + GiB int64 = 1024 * MiB + TiB int64 = 1024 * GiB +) + // RoundUpSize calculates how many allocation units are needed to accommodate // a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS // allocates volumes in gibibyte-sized chunks, diff --git a/local-volume/provisioner/pkg/discovery/discovery.go b/local-volume/provisioner/pkg/discovery/discovery.go index fb7ca3e12a2..2cc1a71fada 100644 --- a/local-volume/provisioner/pkg/discovery/discovery.go +++ b/local-volume/provisioner/pkg/discovery/discovery.go @@ -24,6 +24,7 @@ import ( "github.com/golang/glog" "github.com/kubernetes-incubator/external-storage/local-volume/provisioner/pkg/common" + esUtil "github.com/kubernetes-incubator/external-storage/lib/util" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/api/v1/helper" ) @@ -177,7 +178,7 @@ func (d *Discoverer) createPV(file, class string, config common.MountConfig, cap pvSpec := common.CreateLocalPVSpec(&common.LocalPVConfig{ Name: pvName, HostPath: outsidePath, - Capacity: capacityByte, + Capacity: roundDownCapacityPretty(capacityByte), StorageClass: class, ProvisionerName: d.Name, AffinityAnn: d.nodeAffinityAnn, @@ -191,3 +192,20 @@ func (d *Discoverer) createPV(file, class string, config common.MountConfig, cap } glog.Infof("Created PV %q for volume at %q", pvName, outsidePath) } + +// Round down the capacity to an easy to read value. +func roundDownCapacityPretty(capacityBytes int64) int64 { + + easyToReadUnitsBytes := []int64{esUtil.GiB, esUtil.MiB} + + // Round down to the nearest easy to read unit + // such that there are at least 10 units at that size. + for _, easyToReadUnitBytes := range easyToReadUnitsBytes { + // Round down the capacity to the nearest unit. + size := capacityBytes / easyToReadUnitBytes + if size >= 10 { + return size * easyToReadUnitBytes + } + } + return capacityBytes +} diff --git a/local-volume/provisioner/pkg/discovery/discovery_test.go b/local-volume/provisioner/pkg/discovery/discovery_test.go index 7895d395888..c4f80ea8f00 100644 --- a/local-volume/provisioner/pkg/discovery/discovery_test.go +++ b/local-volume/provisioner/pkg/discovery/discovery_test.go @@ -18,12 +18,13 @@ package discovery import ( "fmt" - "path/filepath" - "testing" - + esUtil "github.com/kubernetes-incubator/external-storage/lib/util" "github.com/kubernetes-incubator/external-storage/local-volume/provisioner/pkg/cache" "github.com/kubernetes-incubator/external-storage/local-volume/provisioner/pkg/common" "github.com/kubernetes-incubator/external-storage/local-volume/provisioner/pkg/util" + "path/filepath" + "testing" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1/helper" @@ -240,7 +241,7 @@ func TestDiscoverVolumes_BadVolume(t *testing.T) { func testSetup(t *testing.T, test *testConfig) *Discoverer { test.cache = cache.NewVolumeCache() - test.volUtil = util.NewFakeVolumeUtil(false) + test.volUtil = util.NewFakeVolumeUtil(false /*deleteShouldFail*/) test.volUtil.AddNewDirEntries(testMountDir, test.dirLayout) test.apiUtil = util.NewFakeAPIUtil(test.apiShouldFail, test.cache) @@ -352,7 +353,7 @@ func verifyCapacity(t *testing.T, createdPV *v1.PersistentVolume, expectedPV *te if !ok { t.Errorf("Unable to convert resource storage into int64") } - if capacityInt != expectedPV.capacity { + if roundDownCapacityPretty(capacityInt) != expectedPV.capacity { t.Errorf("Expected capacity %d, got %d", expectedPV.capacity, capacityInt) } } @@ -422,3 +423,31 @@ func verifyPVsNotInCache(t *testing.T, test *testConfig) { } } } + +func TestRoundDownCapacityPretty(t *testing.T) { + var capTests = []struct { + n int64 // input + expected int64 // expected result + }{ + {100 * esUtil.KiB, 100 * esUtil.KiB}, + {10 * esUtil.MiB, 10 * esUtil.MiB}, + {100 * esUtil.MiB, 100 * esUtil.MiB}, + {10 * esUtil.GiB, 10 * esUtil.GiB}, + {10 * esUtil.TiB, 10 * esUtil.TiB}, + {9*esUtil.GiB + 999*esUtil.MiB, 9*esUtil.GiB + 999*esUtil.MiB}, + {10*esUtil.GiB + 5, 10 * esUtil.GiB}, + {10*esUtil.MiB + 5, 10 * esUtil.MiB}, + {10000*esUtil.MiB - 1, 9999 * esUtil.MiB}, + {13*esUtil.GiB - 1, 12 * esUtil.GiB}, + {63*esUtil.MiB - 10, 62 * esUtil.MiB}, + {12345, 12345}, + {10000*esUtil.GiB - 1, 9999 * esUtil.GiB}, + {3*esUtil.TiB + 2*esUtil.GiB + 1*esUtil.MiB, 3*esUtil.TiB + 2*esUtil.GiB}, + } + for _, tt := range capTests { + actual := roundDownCapacityPretty(tt.n) + if actual != tt.expected { + t.Errorf("roundDownCapacityPretty(%d): expected %d, actual %d", tt.n, tt.expected, actual) + } + } +} From 4fa4e5c23da2af3496df6eb4cc7b4eac87eea0aa Mon Sep 17 00:00:00 2001 From: Yecheng Fu Date: Wed, 25 Oct 2017 15:34:04 +0800 Subject: [PATCH 38/68] ceph/rbd: rbdStatus only check output of successful `rbd status` run --- ceph/rbd/pkg/provision/rbd_util.go | 34 +++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/ceph/rbd/pkg/provision/rbd_util.go b/ceph/rbd/pkg/provision/rbd_util.go index abadb7ee54e..88b5a4b21d0 100644 --- a/ceph/rbd/pkg/provision/rbd_util.go +++ b/ceph/rbd/pkg/provision/rbd_util.go @@ -99,26 +99,40 @@ func (u *RBDUtil) rbdStatus(image string, pOpts *rbdProvisionOptions) (bool, err for i := start; i < start+l; i++ { mon := pOpts.monitors[i%l] // cmd "rbd status" list the rbd client watch with the following output: + // + // # there is a watcher (exit=0) // Watchers: // watcher=10.16.153.105:0/710245699 client.14163 cookie=1 + // + // # there is no watcher (exit=0) + // Watchers: none + // + // Otherwise, exit is non-zero, for example: + // + // # image does not exist (exit=2) + // rbd: error opening image kubernetes-dynamic-pvc-: (2) No such file or directory + // glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", image, mon, pOpts.pool, pOpts.adminID, pOpts.adminSecret) args := []string{"status", image, "--pool", pOpts.pool, "-m", mon, "--id", pOpts.adminID, "--key=" + pOpts.adminSecret} cmd, err = u.execCommand("rbd", args) output = string(cmd) - if err != nil { - // ignore error code, just checkout output for watcher string - // TODO: Why should we ignore error code here? Igorning error code here cause we only try first monitor. - glog.Warningf("failed to execute rbd status on mon %s", mon) + // break if command succeeds + if err == nil { + break } + } - if strings.Contains(output, imageWatcherStr) { - glog.V(4).Infof("rbd: watchers on %s: %s", image, output) - return true, nil - } - glog.Warningf("rbd: no watchers on %s", image) - return false, nil + // If command never succeed, returns its last error. + if err != nil { + return false, err + } + + if strings.Contains(output, imageWatcherStr) { + glog.V(4).Infof("rbd: watchers on %s: %s", image, output) + return true, nil } + glog.Warningf("rbd: no watchers on %s", image) return false, nil } From deeb2ca06cd00fa119965c60e0b809eed50f29be Mon Sep 17 00:00:00 2001 From: raffaelespazzoli Date: Sun, 10 Sep 2017 14:10:39 -0400 Subject: [PATCH 39/68] first draft of support for chap --- iscsi/targetd/README.md | 27 ++++- .../ansible/iscsi-provisioner-class.yaml.j2 | 7 ++ .../ansible/iscsi-provisioner-dc.yaml.j2 | 14 ++- .../targetd/ansible/provisioner-playbook.yaml | 25 ++++- .../session-chap-credential.properties.j2 | 4 + iscsi/targetd/ansible/targetd-playbook.yaml | 20 ++++ iscsi/targetd/cmd/start.go | 13 +++ .../kubernetes/iscsi-provisioner-class.yaml | 6 + .../openshift/iscsi-provisioner-class.yaml | 7 +- .../targetd/provisioner/iscsi-provisioner.go | 106 ++++++++++++++++-- 10 files changed, 214 insertions(+), 15 deletions(-) create mode 100644 iscsi/targetd/ansible/session-chap-credential.properties.j2 diff --git a/iscsi/targetd/README.md b/iscsi/targetd/README.md index 0ffcaf41f5b..69a6df28eb3 100644 --- a/iscsi/targetd/README.md +++ b/iscsi/targetd/README.md @@ -273,6 +273,13 @@ parameters: # this is a comma separated list of initiators that will be give access to the created volumes, they must correspond to what you have configured in your nodes. initiators: iqn.2017-04.com.example:node1 + +# whether or not to use chap authentication for discovery operations + chapAuthDiscovery: "true" + +# whether or not to use chap authentication for session operations + chapAuthSession: "true" + ``` you can create one with the following command in kubernetes @@ -314,11 +321,21 @@ before running the playbooks you need to annotate the inventory file with some a | targetd_lvm_volume_group | the volume group to be created | | targetd_lvm_physical_volume| comma separated list of devices to add to the volume group | | targetd_password | the password used to authenticate the connection to targetd, you may want to not store this on your inventory file, you can pass this as `{{ lookup('env','TARGETD_PASSWORD') }}` | -| targetd_user | the username used to authenticate the connection to targetd, you may want to not store this on your inventory file, you can pass this as `{{ lookup('env','TARGETD_USERNAME') }}` | +| targetd_user | the username used to authenticate the connection to targetd, you may want to not store this on your inventory file, you can pass this as `{{ lookup('env','TARGETD_USERNAME') }}` | | targetd_iscsi_target | the name of the target to be created in the target server | | iscsi_provisioner_pullspec | the location of the iSCSI-targetd provisioner image | | iscsi_provisioner_default_storage_class | whether the created storage class should be the default class | | iscsi_provisioner_portals | optional, comma separated list of alternative IP:port where the iscsi server can be found, specifying this parameters trigger the usage of multipath | +| chap_auth_discovery | true/false whether to use chap authentication for discovery operations | +| discovery_sendtargets_auth_username | initiator username | +| discovery_sendtargets_auth_password | initiator password, you can pass this as `{{ lookup('env','SENDTARGET_PASSWORD') }}` | +| discovery_sendtargets_auth_username_in | target username | +| discovery_sendtargets_auth_password_in | target password, you can pass this as `{{ lookup('env','SENDTARGET_PASSWORD_IN') }}` | +| chap_auth_session | true/false whether to use chap authentication for session operations | +| discovery_session_auth_username | initiator username | +| discovery_session_auth_password | initiator password, you can pass this as `{{ lookup('env','SESSION_PASSWORD') }}` | +| discovery_session_auth_username_in | target username | +| discovery_session_auth_password_in | target password, you can pass this as `{{ lookup('env','SESSION_PASSWORD_IN') }}` | All the nodes should have a label with their defining the initiator name for that node, here is an example: @@ -334,3 +351,11 @@ ansible-playbook -i ansible/targetd-playbook.yaml ansible-playbook -i ansible/initiator-playbook.yaml ansible-playbook -i ansible/provisioner-playbook.yaml ``` + + +## on iSCSI authentication + +If you enable iSCSI CHAP-based authentication, the ansible installer will set the target configuration consinstently and also the storage class. +However at provisioning time the provisioner will not setup the set secret. Having the permissions to setup secret in any manepsace would make the provisioner too powerful and insecure. +So it is up to the project administrator to setup the secret. +The name of the expected secret will be `-chap-secret` diff --git a/iscsi/targetd/ansible/iscsi-provisioner-class.yaml.j2 b/iscsi/targetd/ansible/iscsi-provisioner-class.yaml.j2 index d1fc90c005e..86d7882a7ee 100644 --- a/iscsi/targetd/ansible/iscsi-provisioner-class.yaml.j2 +++ b/iscsi/targetd/ansible/iscsi-provisioner-class.yaml.j2 @@ -27,5 +27,12 @@ parameters: # this is a comma separated list of initiators that will be give access to the created volumes, they must correspond to what you have configured in your nodes. initiators: {% for host in groups['nodes'] %}{{ hostvars[host]['iscsi_initiator_name'] }},{% endfor %} + +# whether or not to use chap authentication for discovery operations + chapAuthDiscovery: {% if chap_auth_discovery | bool %} "true" {% endif %} + +# whether or not to use chap authentication for session operations + chapAuthSession: {% if chap_auth_session | bool %} "true" {% endif %} + diff --git a/iscsi/targetd/ansible/iscsi-provisioner-dc.yaml.j2 b/iscsi/targetd/ansible/iscsi-provisioner-dc.yaml.j2 index fc1673a35b8..628bec02513 100644 --- a/iscsi/targetd/ansible/iscsi-provisioner-dc.yaml.j2 +++ b/iscsi/targetd/ansible/iscsi-provisioner-dc.yaml.j2 @@ -32,4 +32,16 @@ spec: key: password - name: TARGETD_ADDRESS value: {{ hostvars[groups['targetd'][0]]['ansible_default_ipv4']['address'] }} - serviceAccount: iscsi-provisioner +{% if chap_auth_session | bool %} + volumeMounts: + - name: chap-auth-volume + mountPath: /var/run/secrets/iscsi-provisioner +{% endif %} + serviceAccount: iscsi-provisioner +{% if chap_auth_session | bool %} + volumes: + - name: chap-auth-volume + secret: + secretName: chap-auth-secret +{% endif %} + diff --git a/iscsi/targetd/ansible/provisioner-playbook.yaml b/iscsi/targetd/ansible/provisioner-playbook.yaml index 5b2781fa1b3..80bbfca6a39 100644 --- a/iscsi/targetd/ansible/provisioner-playbook.yaml +++ b/iscsi/targetd/ansible/provisioner-playbook.yaml @@ -109,7 +109,30 @@ create -f /tmp/iscsi-provisioner-class.yaml -n iscsi-provisioner when: class_test.rc != 0 - + + - name: create chap credential file, if necessary + template: + src: ./session-chap-credential.properties.j2 + dest: /tmp/session-chap-credential.properties + when: chap_auth_session == true + + - name: test if chap secret exists, if necessary + command: > + /usr/bin/oc --config=/etc/origin/master/admin.kubeconfig + get secret chap-auth-secret -n iscsi-provisioner + when: chap_auth_session == true + failed_when: false + changed_when: false + register: class_test + + - name: create chap secret, if necessary + shell: > + /usr/bin/oc --config=/etc/origin/master/admin.kubeconfig + create secret generic chap-auth-secret --from-file=/tmp/session-chap-credential.properties + -n iscsi-provisioner + when: (chap_auth_session == true) and (class_test.rc != 0) + + - name: create iscsi provisioner deployment config template template: src: ./iscsi-provisioner-dc.yaml.j2 diff --git a/iscsi/targetd/ansible/session-chap-credential.properties.j2 b/iscsi/targetd/ansible/session-chap-credential.properties.j2 new file mode 100644 index 00000000000..0b5e8f4fb7a --- /dev/null +++ b/iscsi/targetd/ansible/session-chap-credential.properties.j2 @@ -0,0 +1,4 @@ +node.session.auth.username={{ discovery_session_auth_username }} +node.session.auth.password={{ discovery_session_auth_password }} +node.session.auth.username_in={{ discovery_session_auth_username_in }} +node.session.auth.password_in={{ discovery_session_auth_password_in }} \ No newline at end of file diff --git a/iscsi/targetd/ansible/targetd-playbook.yaml b/iscsi/targetd/ansible/targetd-playbook.yaml index cbb48a208ff..a68b5dab9d7 100644 --- a/iscsi/targetd/ansible/targetd-playbook.yaml +++ b/iscsi/targetd/ansible/targetd-playbook.yaml @@ -51,3 +51,23 @@ name: targetd enabled: yes state: started + + - name: disable discovery auth, if necessary + command: targetcli iscsi/ set discovery_auth enable=0 + when: chap_auth_discovery == false + + - name: enable discovery auth, if necessary + command: targetcli iscsi/ set discovery_auth enable=1 + when: chap_auth_discovery == true + - name: setup initiator username for discovery, if necessary + command: targetcli iscsi/ set discovery_auth userid={{ discovery_sendtargets_auth_username }} + when: chap_auth_discovery == true + - name: setup initiator password for discovery, if necessary + command: targetcli iscsi/ set discovery_auth password={{ discovery_sendtargets_auth_password }} + when: chap_auth_discovery == true + - name: setup target username for discovery, if necessary + command: targetcli iscsi/ set discovery_auth mutual_userid={{ discovery_sendtargets_auth_username_in }} + when: chap_auth_discovery == true + - name: setup target password for discovery, if necessary + command: targetcli iscsi/ set discovery_auth mutual_password={{ discovery_sendtargets_auth_password_in }} + when: chap_auth_discovery == true \ No newline at end of file diff --git a/iscsi/targetd/cmd/start.go b/iscsi/targetd/cmd/start.go index a90f29f964a..bc1e19007dd 100644 --- a/iscsi/targetd/cmd/start.go +++ b/iscsi/targetd/cmd/start.go @@ -128,6 +128,19 @@ func init() { viper.BindPFlag("master", startcontrollerCmd.Flags().Lookup("master")) startcontrollerCmd.Flags().String("kubeconfig", "", "Absolute path to the kubeconfig") viper.BindPFlag("kubeconfig", startcontrollerCmd.Flags().Lookup("kubeconfig")) + startcontrollerCmd.Flags().String("session-chap-credential-file-path", "/var/run/secrets/iscsi-provisioner/session-chap-credential.properties", "path where the credential for session chap authentication can be found") + viper.BindPFlag("session-chap-credential-file-path", startcontrollerCmd.Flags().Lookup("session-chap-credential-file-path")) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // start-controllerCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // start-controllerCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") + } func initLog() { diff --git a/iscsi/targetd/kubernetes/iscsi-provisioner-class.yaml b/iscsi/targetd/kubernetes/iscsi-provisioner-class.yaml index f7d2bbf053e..479e99be8ff 100644 --- a/iscsi/targetd/kubernetes/iscsi-provisioner-class.yaml +++ b/iscsi/targetd/kubernetes/iscsi-provisioner-class.yaml @@ -21,6 +21,12 @@ parameters: # this is a comma separated list of initiators that will be give access to the created volumes, they must correspond to what you have configured in your nodes. initiators: iqn.2017-04.com.example:node1 + +# whether or not to use chap authentication for discovery operations + chapAuthDiscovery: "true" + +# whether or not to use chap authentication for session operations + chapAuthSession: "true" # This is the filesystem you want your volume to be formatted with, default xfs # fsType: xfs diff --git a/iscsi/targetd/openshift/iscsi-provisioner-class.yaml b/iscsi/targetd/openshift/iscsi-provisioner-class.yaml index b65d674323d..98e12905991 100644 --- a/iscsi/targetd/openshift/iscsi-provisioner-class.yaml +++ b/iscsi/targetd/openshift/iscsi-provisioner-class.yaml @@ -21,10 +21,15 @@ parameters: # this is a comma separated list of initiators that will be give access to the created volumes, they must correspond to what you have configured in your nodes. initiators: iqn.2017-04.com.example:node1 - + # This is the filesystem you want your volume to be formatted with, default xfs # fsType: xfs # Whether the volume should be mounted in readonly mode, default false # readonly: false +# whether or not to use chap authentication for discovery operations + chapAuthDiscovery: "true" + +# whether or not to use chap authentication for session operations + chapAuthSession: "true" \ No newline at end of file diff --git a/iscsi/targetd/provisioner/iscsi-provisioner.go b/iscsi/targetd/provisioner/iscsi-provisioner.go index 064c340f879..76768243e8e 100644 --- a/iscsi/targetd/provisioner/iscsi-provisioner.go +++ b/iscsi/targetd/provisioner/iscsi-provisioner.go @@ -19,27 +19,43 @@ package provisioner import ( "errors" "fmt" - "sort" - "strconv" - "strings" - "github.com/Sirupsen/logrus" "github.com/kubernetes-incubator/external-storage/lib/controller" "github.com/kubernetes-incubator/external-storage/lib/util" + "github.com/magiconair/properties" "github.com/powerman/rpc-codec/jsonrpc2" "github.com/spf13/viper" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sort" + "strconv" + "strings" ) var log = logrus.New() +type chapSessionCredentials struct { + InUser string `properties:"node.session.auth.username"` + InPassword string `properties:"node.session.auth.password"` + OutUser string `properties:"node.session.auth.username_in"` + OutPassword string `properties:"node.session.auth.password_in"` +} + type volCreateArgs struct { Pool string `json:"pool"` Name string `json:"name"` Size int64 `json:"size"` } +//initiator_set_auth(initiator_wwn, in_user, in_pass, out_user, out_pass) +type initiatorSetAuthArgs struct { + InitiatorWwn string `json:"initiator_wwn"` + InUser string `json:"in_user"` + InPassword string `json:"in_pass"` + OutUser string `json:"out_user"` + OutPassword string `json:"out_pass"` +} + type volDestroyArgs struct { Pool string `json:"pool"` Name string `json:"name"` @@ -123,13 +139,16 @@ func (p *iscsiProvisioner) Provision(options controller.VolumeOptions) (*v1.Pers }, PersistentVolumeSource: v1.PersistentVolumeSource{ ISCSI: &v1.ISCSIVolumeSource{ - TargetPortal: options.Parameters["targetPortal"], - Portals: strings.Split(options.Parameters["portals"], "'"), - IQN: options.Parameters["iqn"], - ISCSIInterface: options.Parameters["iscsiInterface"], - Lun: lun, - ReadOnly: getReadOnly(options.Parameters["readonly"]), - FSType: getFsType(options.Parameters["fsType"]), + TargetPortal: options.Parameters["targetPortal"], + Portals: strings.Split(options.Parameters["portals"], "'"), + IQN: options.Parameters["iqn"], + ISCSIInterface: options.Parameters["iscsiInterface"], + Lun: lun, + ReadOnly: getReadOnly(options.Parameters["readonly"]), + FSType: getFsType(options.Parameters["fsType"]), + DiscoveryCHAPAuth: getBool(options.Parameters["chapAuthDiscovery"]), + SessionCHAPAuth: getBool(options.Parameters["chapAuthSession"]), + SecretRef: getSecretRef(getBool(options.Parameters["chapAuthDiscovery"]), getBool(options.Parameters["chapAuthSession"]), &v1.LocalObjectReference{Name: viper.GetString("provisioner-name") + "-chap-secret"}), }, }, }, @@ -152,6 +171,22 @@ func getFsType(fsType string) string { return fsType } +func getSecretRef(discovery bool, session bool, ref *v1.LocalObjectReference) *v1.LocalObjectReference { + if discovery || session { + return ref + } + return nil +} + +func getBool(value string) bool { + res, err := strconv.ParseBool(value) + if err != nil { + return false + } + return res + +} + // Delete removes the storage asset that was created by Provision represented // by the given PV. func (p *iscsiProvisioner) Delete(volume *v1.PersistentVolume) error { @@ -190,6 +225,22 @@ func (p *iscsiProvisioner) createVolume(options controller.VolumeOptions) (vol s vol = p.getVolumeName(options) pool = p.getVolumeGroup(options) initiators := p.getInitiators(options) + chap_credentials := &chapSessionCredentials{} + + //read chap session authentication credentials + if getBool(options.Parameters["chapAuthSession"]) { + prop, err := properties.LoadFile(viper.GetString("session-chap-credential-file-path"), properties.UTF8) + if err != nil { + log.Warnln(err) + return "", 0, "", err + } + err = prop.Decode(chap_credentials) + if err != nil { + log.Warnln(err) + return "", 0, "", err + } + } + log.Debugln("calling export_list") exportList1, err := p.exportList() if err != nil { @@ -217,6 +268,15 @@ func (p *iscsiProvisioner) createVolume(options controller.VolumeOptions) (vol s return "", 0, "", err } log.Debugln("exported volume name, lun, pool, initiator ", vol, lun, pool, initiator) + if getBool(options.Parameters["chapAuthSession"]) { + log.Debugln("setting up chap session auth for initiator, initiator, in_user, out_user: ", initiator, chap_credentials.InUser, chap_credentials.OutUser) + err = p.setInitiatorAuth(initiator, chap_credentials.InUser, chap_credentials.InPassword, chap_credentials.OutUser, chap_credentials.OutPassword) + if err != nil { + log.Warnln(err) + return "", 0, "", err + } + log.Debugln("set up chap session auth for initiator, initiator, in_user, out_user: ", initiator, chap_credentials.InUser, chap_credentials.OutUser) + } } return vol, lun, pool, nil } @@ -361,6 +421,30 @@ func (p *iscsiProvisioner) exportList() (exportList, error) { return result1, err } +//initiator_set_auth(initiator_wwn, in_user, in_pass, out_user, out_pass) + +func (p *iscsiProvisioner) setInitiatorAuth(initiator string, in_user string, in_password string, out_user string, out_password string) error { + + client, err := p.getConnection() + defer client.Close() + if err != nil { + log.Warnln(err) + return err + } + + //make arguments object + args := initiatorSetAuthArgs{ + InitiatorWwn: initiator, + InUser: in_user, + InPassword: in_password, + OutUser: out_user, + OutPassword: out_password, + } + //call remote procedure with args + err = client.Call("initiator_set_auth", args, null) + return err +} + func (slice exportList) Len() int { return len(slice) } From 0db4c3670c09fdc94538e73a8bb27258c21ea46b Mon Sep 17 00:00:00 2001 From: raffaelespazzoli Date: Mon, 11 Sep 2017 12:22:36 -0400 Subject: [PATCH 40/68] added example of a iscsi chap secret --- iscsi/targetd/openshift/iscsi-chap-secret.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 iscsi/targetd/openshift/iscsi-chap-secret.yaml diff --git a/iscsi/targetd/openshift/iscsi-chap-secret.yaml b/iscsi/targetd/openshift/iscsi-chap-secret.yaml new file mode 100644 index 00000000000..e8e13ca34a7 --- /dev/null +++ b/iscsi/targetd/openshift/iscsi-chap-secret.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +metadata: + name: iscsi-chap-secret +type: "kubernetes.io/iscsi-chap" +data: + discovery.sendtargets.auth.username: aW5pdGlhdG9yX2Rpc2NvdmVyeV91c2Vy + discovery.sendtargets.auth.password: aW5pdGlhdG9yX2Rpc2NvdmVyeV9wYXNzd29yZA== + discovery.sendtargets.auth.username_in: dGFyZ2V0X2Rpc2NvdmVyeV91c2Vy + discovery.sendtargets.auth.password_in: dGFyZ2V0X2Rpc2NvdmVyeV9wYXNzd29yZA== + node.session.auth.username: aW5pdGlhdG9yX3Nlc3Npb25fdXNlcg== + node.session.auth.password: aW5pdGlhdG9yX3Nlc3Npb25fcGFzc3dvcmQ= + node.session.auth.username_in: dGFyZ2V0X3Nlc3Npb25fdXNlcg== + node.session.auth.password_in: dGFyZ2V0X3Nlc3Npb25fcGFzc3dvcmQ= \ No newline at end of file From ddab53fb729854075afeb90c775df532b1f99cd9 Mon Sep 17 00:00:00 2001 From: raffaelespazzoli Date: Wed, 20 Sep 2017 21:02:20 -0400 Subject: [PATCH 41/68] complied with goLint rules --- .../targetd/provisioner/iscsi-provisioner.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/iscsi/targetd/provisioner/iscsi-provisioner.go b/iscsi/targetd/provisioner/iscsi-provisioner.go index 76768243e8e..5e444845615 100644 --- a/iscsi/targetd/provisioner/iscsi-provisioner.go +++ b/iscsi/targetd/provisioner/iscsi-provisioner.go @@ -225,7 +225,7 @@ func (p *iscsiProvisioner) createVolume(options controller.VolumeOptions) (vol s vol = p.getVolumeName(options) pool = p.getVolumeGroup(options) initiators := p.getInitiators(options) - chap_credentials := &chapSessionCredentials{} + chapCredentials := &chapSessionCredentials{} //read chap session authentication credentials if getBool(options.Parameters["chapAuthSession"]) { @@ -234,7 +234,7 @@ func (p *iscsiProvisioner) createVolume(options controller.VolumeOptions) (vol s log.Warnln(err) return "", 0, "", err } - err = prop.Decode(chap_credentials) + err = prop.Decode(chapCredentials) if err != nil { log.Warnln(err) return "", 0, "", err @@ -269,13 +269,13 @@ func (p *iscsiProvisioner) createVolume(options controller.VolumeOptions) (vol s } log.Debugln("exported volume name, lun, pool, initiator ", vol, lun, pool, initiator) if getBool(options.Parameters["chapAuthSession"]) { - log.Debugln("setting up chap session auth for initiator, initiator, in_user, out_user: ", initiator, chap_credentials.InUser, chap_credentials.OutUser) - err = p.setInitiatorAuth(initiator, chap_credentials.InUser, chap_credentials.InPassword, chap_credentials.OutUser, chap_credentials.OutPassword) + log.Debugln("setting up chap session auth for initiator, initiator, in_user, out_user: ", initiator, chapCredentials.InUser, chapCredentials.OutUser) + err = p.setInitiatorAuth(initiator, chapCredentials.InUser, chapCredentials.InPassword, chapCredentials.OutUser, chapCredentials.OutPassword) if err != nil { log.Warnln(err) return "", 0, "", err } - log.Debugln("set up chap session auth for initiator, initiator, in_user, out_user: ", initiator, chap_credentials.InUser, chap_credentials.OutUser) + log.Debugln("set up chap session auth for initiator, initiator, in_user, out_user: ", initiator, chapCredentials.InUser, chapCredentials.OutUser) } } return vol, lun, pool, nil @@ -423,7 +423,7 @@ func (p *iscsiProvisioner) exportList() (exportList, error) { //initiator_set_auth(initiator_wwn, in_user, in_pass, out_user, out_pass) -func (p *iscsiProvisioner) setInitiatorAuth(initiator string, in_user string, in_password string, out_user string, out_password string) error { +func (p *iscsiProvisioner) setInitiatorAuth(initiator string, inUser string, inPassword string, outUser string, outPassword string) error { client, err := p.getConnection() defer client.Close() @@ -435,10 +435,10 @@ func (p *iscsiProvisioner) setInitiatorAuth(initiator string, in_user string, in //make arguments object args := initiatorSetAuthArgs{ InitiatorWwn: initiator, - InUser: in_user, - InPassword: in_password, - OutUser: out_user, - OutPassword: out_password, + InUser: inUser, + InPassword: inPassword, + OutUser: outUser, + OutPassword: outPassword, } //call remote procedure with args err = client.Call("initiator_set_auth", args, null) From a278620e2fb03514d9898d03480cff65fcd373e8 Mon Sep 17 00:00:00 2001 From: raffaelespazzoli Date: Sun, 10 Sep 2017 14:10:39 -0400 Subject: [PATCH 42/68] first draft of support for chap --- iscsi/targetd/README.md | 1 + iscsi/targetd/provisioner/iscsi-provisioner.go | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/iscsi/targetd/README.md b/iscsi/targetd/README.md index 69a6df28eb3..890de139d22 100644 --- a/iscsi/targetd/README.md +++ b/iscsi/targetd/README.md @@ -359,3 +359,4 @@ If you enable iSCSI CHAP-based authentication, the ansible installer will set th However at provisioning time the provisioner will not setup the set secret. Having the permissions to setup secret in any manepsace would make the provisioner too powerful and insecure. So it is up to the project administrator to setup the secret. The name of the expected secret will be `-chap-secret` + diff --git a/iscsi/targetd/provisioner/iscsi-provisioner.go b/iscsi/targetd/provisioner/iscsi-provisioner.go index 5e444845615..4912d632e2e 100644 --- a/iscsi/targetd/provisioner/iscsi-provisioner.go +++ b/iscsi/targetd/provisioner/iscsi-provisioner.go @@ -226,7 +226,6 @@ func (p *iscsiProvisioner) createVolume(options controller.VolumeOptions) (vol s pool = p.getVolumeGroup(options) initiators := p.getInitiators(options) chapCredentials := &chapSessionCredentials{} - //read chap session authentication credentials if getBool(options.Parameters["chapAuthSession"]) { prop, err := properties.LoadFile(viper.GetString("session-chap-credential-file-path"), properties.UTF8) From 9efa63914e5508c41f55032582ea90cf1c7fcda7 Mon Sep 17 00:00:00 2001 From: raffaelespazzoli Date: Fri, 27 Oct 2017 12:23:09 -0400 Subject: [PATCH 43/68] no change --- iscsi/targetd/ansible/multipath.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iscsi/targetd/ansible/multipath.conf b/iscsi/targetd/ansible/multipath.conf index 5cff8676eb6..de126be1d70 100644 --- a/iscsi/targetd/ansible/multipath.conf +++ b/iscsi/targetd/ansible/multipath.conf @@ -1,2 +1,2 @@ blacklist { -} +} \ No newline at end of file From 401d5733ba85868b15c838de318ef9cb7d939993 Mon Sep 17 00:00:00 2001 From: raffaelespazzoli Date: Fri, 27 Oct 2017 21:34:23 -0400 Subject: [PATCH 44/68] fixed compile --- iscsi/targetd/provisioner/iscsi-provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iscsi/targetd/provisioner/iscsi-provisioner.go b/iscsi/targetd/provisioner/iscsi-provisioner.go index 4912d632e2e..0fb7489ef0f 100644 --- a/iscsi/targetd/provisioner/iscsi-provisioner.go +++ b/iscsi/targetd/provisioner/iscsi-provisioner.go @@ -440,7 +440,7 @@ func (p *iscsiProvisioner) setInitiatorAuth(initiator string, inUser string, inP OutPassword: outPassword, } //call remote procedure with args - err = client.Call("initiator_set_auth", args, null) + err = client.Call("initiator_set_auth", args, nil) return err } From 97c801a069ae93504ab3b1e4a1ca51a45a2ea5eb Mon Sep 17 00:00:00 2001 From: Xing Zhou Date: Tue, 10 Oct 2017 13:13:50 +0800 Subject: [PATCH 45/68] Updated all k8s.io/* dependencies to tag v1.8.2 --- glide.lock | 223 +- glide.yaml | 15 +- gluster/glusterfs/pkg/volume/exec.go | 10 +- lib/controller/controller.go | 4 +- lib/controller/controller_test.go | 2 +- .../providers/openstack/metadata.go | 2 +- .../pkg/controller/snapshotter/snapshotter.go | 2 +- vendor/github.com/Sirupsen/logrus/.travis.yml | 6 +- .../github.com/Sirupsen/logrus/CHANGELOG.md | 7 - vendor/github.com/Sirupsen/logrus/README.md | 129 +- vendor/github.com/Sirupsen/logrus/entry.go | 4 - vendor/github.com/Sirupsen/logrus/exported.go | 6 - .../github.com/Sirupsen/logrus/formatter.go | 4 - .../Sirupsen/logrus/json_formatter.go | 22 +- .../Sirupsen/logrus/json_formatter_test.go | 120 - vendor/github.com/Sirupsen/logrus/logger.go | 84 +- .../github.com/Sirupsen/logrus/logrus_test.go | 24 +- .../Sirupsen/logrus/terminal_notwindows.go | 2 +- .../Sirupsen/logrus/terminal_openbsd.go | 7 - .../Sirupsen/logrus/text_formatter.go | 51 +- .../Sirupsen/logrus/text_formatter_test.go | 30 +- vendor/github.com/Sirupsen/logrus/writer.go | 31 - vendor/github.com/aws/aws-sdk-go/.travis.yml | 14 +- vendor/github.com/aws/aws-sdk-go/CHANGELOG.md | 189 + .../github.com/aws/aws-sdk-go/CONTRIBUTING.md | 2 +- vendor/github.com/aws/aws-sdk-go/Gopkg.lock | 20 + vendor/github.com/aws/aws-sdk-go/Gopkg.toml | 48 + vendor/github.com/aws/aws-sdk-go/README.md | 2 +- .../aws/aws-sdk-go/aws/awsutil/copy_test.go | 178 +- .../aws/aws-sdk-go/aws/awsutil/equal_test.go | 5 +- .../aws-sdk-go/aws/awsutil/path_value_test.go | 82 +- .../aws/aws-sdk-go/aws/convert_types_test.go | 347 +- .../aws/corehandlers/handlers_test.go | 91 +- .../aws/corehandlers/param_validator_test.go | 72 +- .../aws-sdk-go/aws/ec2metadata/api_test.go | 117 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 22 +- .../aws-sdk-go/aws/session/session_test.go | 28 +- .../aws/signer/v4/functional_1_5_test.go | 41 +- .../aws/signer/v4/header_rules_test.go | 46 +- .../aws/aws-sdk-go/aws/types_test.go | 39 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../github.com/aws/aws-sdk-go/buildspec.yml | 21 + .../private/protocol/ec2query/build_test.go | 81 +- .../protocol/ec2query/unmarshal_test.go | 161 +- .../private/protocol/jsonrpc/build_test.go | 209 +- .../protocol/jsonrpc/unmarshal_test.go | 153 +- .../private/protocol/query/build_test.go | 798 +- .../protocol/query/queryutil/queryutil.go | 4 + .../private/protocol/query/unmarshal_test.go | 281 +- .../private/protocol/restjson/build_test.go | 229 +- .../protocol/restjson/unmarshal_test.go | 229 +- .../aws/aws-sdk-go/service/ec2/api.go | 681 +- .../service/ec2/customizations_test.go | 25 +- .../aws/aws-sdk-go/service/ec2/waiters.go | 5 + .../aws/aws-sdk-go/service/ecr/api.go | 1382 +- .../aws/aws-sdk-go/service/ecr/doc.go | 10 +- .../aws/aws-sdk-go/service/ecr/errors.go | 23 +- .../service/sts/customizations_test.go | 18 +- .../github.com/docker/distribution/.mailmap | 4 +- vendor/github.com/docker/distribution/AUTHORS | 54 + .../docker/distribution/BUILDING.md | 117 + .../docker/distribution/CHANGELOG.md | 108 + .../docker/distribution/CONTRIBUTING.md | 8 + .../github.com/docker/distribution/Dockerfile | 13 +- .../docker/distribution/MAINTAINERS | 5 - .../github.com/docker/distribution/Makefile | 41 +- .../github.com/docker/distribution/README.md | 13 +- .../docker/distribution/RELEASE-CHECKLIST.md | 44 + .../github.com/docker/distribution/ROADMAP.md | 2 +- .../github.com/docker/distribution/blobs.go | 24 +- .../github.com/docker/distribution/circle.yml | 19 +- .../digest/digester_resumable_test.go | 21 - .../distribution/digest/verifiers_test.go | 49 - .../distribution/{digest => digestset}/set.go | 30 +- .../{digest => digestset}/set_test.go | 25 +- .../github.com/docker/distribution/errors.go | 4 +- .../docker/distribution/manifests.go | 40 +- .../docker/distribution/reference/helpers.go | 42 + .../distribution/reference/normalize.go | 170 + .../distribution/reference/normalize_test.go | 625 + .../distribution/reference/reference.go | 213 +- .../distribution/reference/reference_test.go | 212 +- .../docker/distribution/reference/regexp.go | 41 +- .../distribution/reference/regexp_test.go | 68 +- .../docker/distribution/registry.go | 2 +- .../docker/distribution/vendor.conf | 43 + .../{engine-api => docker/api}/types/auth.go | 0 .../api}/types/blkiodev/blkio.go | 0 .../api}/types/client.go | 193 +- .../api}/types/configs.go | 24 +- .../docker/api/types/container/config.go | 62 + .../api/types/container/container_create.go | 21 + .../api/types/container/container_update.go | 17 + .../api/types/container/container_wait.go | 17 + .../api}/types/container/host_config.go | 37 +- .../api}/types/container/hostconfig_unix.go | 2 +- .../types/container/hostconfig_windows.go | 0 .../docker/docker/api/types/error_response.go | 13 + .../api}/types/filters/parse.go | 23 +- .../api}/types/filters/parse_test.go | 23 + .../docker/docker/api/types/id_response.go | 13 + .../docker/docker/api/types/image_summary.go | 49 + .../docker/docker/api/types/mount/mount.go | 113 + .../api}/types/network/network.go | 11 +- .../docker/docker/api/types/plugin.go | 189 + .../docker/docker/api/types/plugin_device.go | 25 + .../docker/docker/api/types/plugin_env.go | 25 + .../docker/api/types/plugin_interface_type.go | 21 + .../docker/docker/api/types/plugin_mount.go | 37 + .../docker/api/types/plugin_responses.go | 64 + .../docker/docker/api/types/port.go | 23 + .../docker/api/types/registry/authenticate.go | 21 + .../api}/types/registry/registry.go | 5 + .../api}/types/seccomp.go | 41 +- .../api/types/service_update_response.go | 12 + .../docker/docker/api/types/stats.go | 178 + .../api}/types/strslice/strslice.go | 0 .../api}/types/strslice/strslice_test.go | 0 .../docker/docker/api/types/swarm/common.go | 27 + .../docker/api/types/swarm/container.go | 46 + .../docker/docker/api/types/swarm/network.go | 111 + .../docker/docker/api/types/swarm/node.go | 114 + .../docker/docker/api/types/swarm/secret.go | 31 + .../docker/docker/api/types/swarm/service.go | 105 + .../docker/docker/api/types/swarm/swarm.go | 197 + .../docker/docker/api/types/swarm/task.go | 128 + .../docker/docker/api/types/types.go | 549 + .../api}/types/versions/README.md | 2 +- .../api}/types/versions/compare.go | 0 .../api}/types/versions/compare_test.go | 0 .../docker/docker/api/types/volume.go | 58 + .../github.com/docker/engine-api/.travis.yml | 9 - .../github.com/docker/engine-api/CHANGELOG.md | 141 - .../github.com/docker/engine-api/MAINTAINERS | 160 - vendor/github.com/docker/engine-api/Makefile | 21 - vendor/github.com/docker/engine-api/README.md | 68 - .../github.com/docker/engine-api/appveyor.yml | 37 - .../docker/engine-api/client/client.go | 141 - .../docker/engine-api/client/client_darwin.go | 4 - .../engine-api/client/client_mock_test.go | 54 - .../docker/engine-api/client/client_test.go | 119 - .../docker/engine-api/client/client_unix.go | 6 - .../engine-api/client/client_windows.go | 4 - .../engine-api/client/container_attach.go | 34 - .../engine-api/client/container_commit.go | 53 - .../client/container_commit_test.go | 91 - .../engine-api/client/container_copy.go | 97 - .../engine-api/client/container_copy_test.go | 244 - .../engine-api/client/container_create.go | 46 - .../client/container_create_test.go | 72 - .../engine-api/client/container_diff.go | 23 - .../engine-api/client/container_diff_test.go | 55 - .../engine-api/client/container_exec.go | 49 - .../engine-api/client/container_exec_test.go | 149 - .../engine-api/client/container_export.go | 20 - .../client/container_export_test.go | 18 - .../engine-api/client/container_inspect.go | 65 - .../client/container_inspect_test.go | 119 - .../engine-api/client/container_kill.go | 17 - .../engine-api/client/container_kill_test.go | 41 - .../engine-api/client/container_list.go | 56 - .../engine-api/client/container_list_test.go | 96 - .../engine-api/client/container_logs.go | 52 - .../engine-api/client/container_logs_test.go | 40 - .../engine-api/client/container_pause.go | 10 - .../engine-api/client/container_pause_test.go | 41 - .../engine-api/client/container_remove.go | 27 - .../client/container_remove_test.go | 59 - .../engine-api/client/container_rename.go | 16 - .../client/container_rename_test.go | 46 - .../engine-api/client/container_resize.go | 29 - .../client/container_resize_test.go | 82 - .../engine-api/client/container_restart.go | 19 - .../client/container_restart_test.go | 46 - .../engine-api/client/container_start.go | 10 - .../engine-api/client/container_start_test.go | 45 - .../engine-api/client/container_stats.go | 24 - .../engine-api/client/container_stats_test.go | 18 - .../engine-api/client/container_stop.go | 18 - .../engine-api/client/container_stop_test.go | 41 - .../docker/engine-api/client/container_top.go | 28 - .../engine-api/client/container_top_test.go | 69 - .../engine-api/client/container_unpause.go | 10 - .../client/container_unpause_test.go | 41 - .../engine-api/client/container_update.go | 13 - .../client/container_update_test.go | 50 - .../engine-api/client/container_wait.go | 26 - .../engine-api/client/container_wait_test.go | 70 - .../docker/engine-api/client/errors.go | 94 - .../docker/engine-api/client/events.go | 48 - .../docker/engine-api/client/hijack.go | 174 - .../docker/engine-api/client/image_build.go | 135 - .../engine-api/client/image_build_test.go | 230 - .../docker/engine-api/client/image_create.go | 34 - .../engine-api/client/image_create_test.go | 76 - .../docker/engine-api/client/image_history.go | 22 - .../engine-api/client/image_history_test.go | 60 - .../docker/engine-api/client/image_import.go | 37 - .../engine-api/client/image_import_test.go | 81 - .../docker/engine-api/client/image_inspect.go | 38 - .../engine-api/client/image_inspect_test.go | 97 - .../docker/engine-api/client/image_list.go | 40 - .../engine-api/client/image_list_test.go | 122 - .../docker/engine-api/client/image_load.go | 30 - .../engine-api/client/image_load_test.go | 95 - .../docker/engine-api/client/image_pull.go | 46 - .../engine-api/client/image_pull_test.go | 199 - .../docker/engine-api/client/image_push.go | 54 - .../docker/engine-api/client/image_remove.go | 31 - .../engine-api/client/image_remove_test.go | 95 - .../docker/engine-api/client/image_save.go | 22 - .../engine-api/client/image_save_test.go | 58 - .../docker/engine-api/client/image_search.go | 49 - .../docker/engine-api/client/image_tag.go | 38 - .../engine-api/client/image_tag_test.go | 142 - .../docker/engine-api/client/info.go | 26 - .../docker/engine-api/client/info_test.go | 47 - .../docker/engine-api/client/interface.go | 79 - .../docker/engine-api/client/login.go | 28 - .../engine-api/client/network_connect.go | 18 - .../engine-api/client/network_connect_test.go | 107 - .../engine-api/client/network_create.go | 25 - .../engine-api/client/network_create_test.go | 72 - .../engine-api/client/network_disconnect.go | 14 - .../client/network_disconnect_test.go | 64 - .../engine-api/client/network_inspect.go | 24 - .../engine-api/client/network_inspect_test.go | 69 - .../docker/engine-api/client/network_list.go | 31 - .../engine-api/client/network_list_test.go | 108 - .../engine-api/client/network_remove.go | 10 - .../engine-api/client/network_remove_test.go | 47 - .../docker/engine-api/client/request.go | 185 - .../docker/engine-api/client/request_test.go | 77 - .../client/transport/cancellable/canceler.go | 23 - .../transport/cancellable/canceler_go14.go | 27 - .../transport/cancellable/cancellable.go | 113 - .../engine-api/client/transport/client.go | 47 - .../engine-api/client/transport/transport.go | 57 - .../docker/engine-api/client/version.go | 21 - .../docker/engine-api/client/volume_create.go | 20 - .../engine-api/client/volume_create_test.go | 74 - .../engine-api/client/volume_inspect.go | 24 - .../engine-api/client/volume_inspect_test.go | 76 - .../docker/engine-api/client/volume_list.go | 32 - .../engine-api/client/volume_list_test.go | 97 - .../docker/engine-api/client/volume_remove.go | 10 - .../engine-api/client/volume_remove_test.go | 47 - vendor/github.com/docker/engine-api/doc.go | 21 - .../engine-api/types/container/config.go | 37 - .../types/reference/image_reference.go | 34 - .../types/reference/image_reference_test.go | 72 - .../docker/engine-api/types/stats.go | 115 - .../docker/engine-api/types/time/timestamp.go | 124 - .../engine-api/types/time/timestamp_test.go | 93 - .../docker/engine-api/types/types.go | 473 - .../docker/go-connections/nat/nat.go | 173 +- .../docker/go-connections/nat/nat_test.go | 58 + .../docker/go-connections/nat/parse.go | 1 + .../go-connections/sockets/inmem_socket.go | 8 - .../docker/go-connections/sockets/sockets.go | 16 +- .../go-connections/sockets/sockets_unix.go | 20 + .../go-connections/sockets/sockets_windows.go | 14 + .../go-connections/sockets/tcp_socket.go | 2 +- .../go-connections/sockets/unix_socket.go | 56 +- .../go-connections/tlsconfig/certpool_go17.go | 18 + .../tlsconfig/certpool_other.go | 14 + .../docker/go-connections/tlsconfig/config.go | 180 +- .../go-connections/tlsconfig/config_test.go | 502 +- vendor/github.com/docker/go-units/size.go | 4 +- .../github.com/docker/go-units/size_test.go | 34 +- .../exponent-io/jsonpath/.gitignore | 24 - .../exponent-io/jsonpath/.travis.yml | 5 - .../github.com/exponent-io/jsonpath/README.md | 66 - .../exponent-io/jsonpath/decoder.go | 210 - .../exponent-io/jsonpath/decoder_test.go | 166 - .../exponent-io/jsonpath/example_test.go | 109 - .../github.com/exponent-io/jsonpath/path.go | 67 - .../exponent-io/jsonpath/path_test.go | 132 - .../exponent-io/jsonpath/pathaction.go | 61 - .../exponent-io/jsonpath/pathaction_test.go | 563 - vendor/github.com/go-ini/ini/.gitignore | 1 + vendor/github.com/go-ini/ini/.travis.yml | 13 + vendor/github.com/go-ini/ini/Makefile | 12 + vendor/github.com/go-ini/ini/README.md | 198 +- vendor/github.com/go-ini/ini/README_ZH.md | 202 +- vendor/github.com/go-ini/ini/error.go | 32 + vendor/github.com/go-ini/ini/ini.go | 1083 +- vendor/github.com/go-ini/ini/ini_test.go | 599 +- vendor/github.com/go-ini/ini/key.go | 699 + vendor/github.com/go-ini/ini/key_test.go | 573 + vendor/github.com/go-ini/ini/parser.go | 361 + vendor/github.com/go-ini/ini/parser_test.go | 42 + vendor/github.com/go-ini/ini/section.go | 248 + vendor/github.com/go-ini/ini/section_test.go | 75 + vendor/github.com/go-ini/ini/struct.go | 278 +- vendor/github.com/go-ini/ini/struct_test.go | 133 +- .../github.com/go-openapi/analysis/.drone.sec | 1 - .../github.com/go-openapi/analysis/.drone.yml | 36 - .../github.com/go-openapi/analysis/.gitignore | 2 - .../go-openapi/analysis/.pullapprove.yml | 12 - .../go-openapi/analysis/CODE_OF_CONDUCT.md | 74 - .../github.com/go-openapi/analysis/README.md | 6 - .../go-openapi/analysis/analyzer.go | 614 - .../go-openapi/analysis/analyzer_test.go | 237 - vendor/github.com/go-openapi/loads/.drone.sec | 1 - vendor/github.com/go-openapi/loads/.drone.yml | 39 - vendor/github.com/go-openapi/loads/.gitignore | 4 - .../go-openapi/loads/.pullapprove.yml | 13 - .../go-openapi/loads/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/loads/README.md | 5 - .../github.com/go-openapi/loads/json_test.go | 318 - vendor/github.com/go-openapi/loads/spec.go | 203 - .../github.com/go-openapi/loads/spec_test.go | 339 - vendor/github.com/golang/glog/README | 2 +- vendor/github.com/golang/glog/glog.go | 5 +- .../github.com/golang/protobuf/ptypes/any.go | 136 + .../golang/protobuf/ptypes/any/any.pb.go | 155 + .../golang/protobuf/ptypes/any/any.proto | 140 + .../golang/protobuf/ptypes/any_test.go | 113 + .../github.com/golang/protobuf/ptypes/doc.go | 35 + .../golang/protobuf/ptypes/duration.go | 102 + .../protobuf/ptypes/duration/duration.pb.go | 114 + .../protobuf/ptypes/duration/duration.proto | 98 + .../golang/protobuf/ptypes/duration_test.go | 121 + .../golang/protobuf/ptypes/regen.sh | 66 + .../golang/protobuf/ptypes/timestamp.go | 125 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 127 + .../protobuf/ptypes/timestamp/timestamp.proto | 111 + .../golang/protobuf/ptypes/timestamp_test.go | 138 + vendor/github.com/google/btree/.travis.yml | 1 + .../analysis => google/btree}/LICENSE | 0 vendor/github.com/google/btree/README.md | 12 + vendor/github.com/google/btree/btree.go | 649 + vendor/github.com/google/btree/btree_mem.go | 76 + vendor/github.com/google/btree/btree_test.go | 309 + .../github.com/googleapis/gnostic/.gitignore | 13 + .../googleapis/gnostic/.travis-install.sh | 36 + .../github.com/googleapis/gnostic/.travis.yml | 43 + .../googleapis/gnostic/COMPILE-PROTOS.sh | 31 + .../googleapis/gnostic/CONTRIBUTING.md | 35 + vendor/github.com/googleapis/gnostic/LICENSE | 203 + vendor/github.com/googleapis/gnostic/Makefile | 15 + .../googleapis/gnostic/OpenAPIv2/OpenAPIv2.go | 8728 ++ .../gnostic/OpenAPIv2/OpenAPIv2.pb.go | 4456 + .../gnostic/OpenAPIv2/OpenAPIv2.proto | 663 + .../googleapis/gnostic/OpenAPIv2/README.md | 16 + .../gnostic/OpenAPIv2/openapi-2.0.json | 1610 + .../github.com/googleapis/gnostic/README.md | 103 + .../googleapis/gnostic/compiler/README.md | 3 + .../googleapis/gnostic/compiler/context.go | 43 + .../googleapis/gnostic/compiler/error.go | 61 + .../gnostic/compiler/extension-handler.go | 101 + .../googleapis/gnostic/compiler/helpers.go | 197 + .../googleapis/gnostic/compiler/main.go | 16 + .../googleapis/gnostic/compiler/reader.go | 167 + .../gnostic/extensions/COMPILE-EXTENSION.sh | 5 + .../googleapis/gnostic/extensions/README.md | 5 + .../gnostic/extensions/extension.pb.go | 219 + .../gnostic/extensions/extension.proto | 93 + .../gnostic/extensions/extensions.go | 82 + .../github.com/googleapis/gnostic/gnostic.go | 556 + .../googleapis/gnostic/gnostic_test.go | 453 + .../gregjones/httpcache/.travis.yml | 18 + .../gregjones/httpcache/LICENSE.txt | 7 + .../github.com/gregjones/httpcache/README.md | 24 + .../httpcache/diskcache/diskcache.go | 61 + .../httpcache/diskcache/diskcache_test.go | 42 + .../gregjones/httpcache/httpcache.go | 553 + .../gregjones/httpcache/httpcache_test.go | 1360 + .../github.com/json-iterator/go/.codecov.yml | 3 + vendor/github.com/json-iterator/go/.gitignore | 3 + .../github.com/json-iterator/go/.travis.yml | 13 + .../jsonpath => json-iterator/go}/LICENSE | 4 +- vendor/github.com/json-iterator/go/README.md | 80 + .../json-iterator/go/example_test.go | 95 + .../json-iterator/go/feature_adapter.go | 127 + .../json-iterator/go/feature_any.go | 242 + .../json-iterator/go/feature_any_array.go | 278 + .../json-iterator/go/feature_any_bool.go | 137 + .../json-iterator/go/feature_any_float.go | 83 + .../json-iterator/go/feature_any_int32.go | 74 + .../json-iterator/go/feature_any_int64.go | 74 + .../json-iterator/go/feature_any_invalid.go | 82 + .../json-iterator/go/feature_any_nil.go | 69 + .../json-iterator/go/feature_any_number.go | 104 + .../json-iterator/go/feature_any_object.go | 374 + .../json-iterator/go/feature_any_string.go | 166 + .../json-iterator/go/feature_any_uint32.go | 74 + .../json-iterator/go/feature_any_uint64.go | 74 + .../json-iterator/go/feature_config.go | 312 + .../json-iterator/go/feature_iter.go | 307 + .../json-iterator/go/feature_iter_array.go | 58 + .../json-iterator/go/feature_iter_float.go | 341 + .../json-iterator/go/feature_iter_int.go | 258 + .../json-iterator/go/feature_iter_object.go | 212 + .../json-iterator/go/feature_iter_skip.go | 127 + .../go/feature_iter_skip_sloppy.go | 144 + .../go/feature_iter_skip_strict.go | 89 + .../json-iterator/go/feature_iter_string.go | 215 + .../json-iterator/go/feature_json_number.go | 15 + .../json-iterator/go/feature_pool.go | 57 + .../json-iterator/go/feature_reflect.go | 691 + .../json-iterator/go/feature_reflect_array.go | 99 + .../go/feature_reflect_extension.go | 413 + .../json-iterator/go/feature_reflect_map.go | 244 + .../go/feature_reflect_native.go | 672 + .../go/feature_reflect_object.go | 196 + .../json-iterator/go/feature_reflect_slice.go | 149 + .../go/feature_reflect_struct_decoder.go | 916 + .../json-iterator/go/feature_stream.go | 305 + .../json-iterator/go/feature_stream_float.go | 96 + .../json-iterator/go/feature_stream_int.go | 320 + .../json-iterator/go/feature_stream_string.go | 396 + .../go/fuzzy_mode_convert_table.md | 7 + .../github.com/json-iterator/go/jsoniter.go | 18 + .../go/jsoniter_1dot8_only_test.go | 45 + .../json-iterator/go/jsoniter_adapter_test.go | 84 + .../json-iterator/go/jsoniter_alias_test.go | 62 + .../go/jsoniter_any_array_test.go | 122 + .../go/jsoniter_any_bool_test.go | 64 + .../go/jsoniter_any_float_test.go | 102 + .../json-iterator/go/jsoniter_any_int_test.go | 197 + .../json-iterator/go/jsoniter_any_map_test.go | 14 + .../go/jsoniter_any_null_test.go | 15 + .../go/jsoniter_any_object_test.go | 107 + .../go/jsoniter_any_string_test.go | 56 + .../json-iterator/go/jsoniter_array_test.go | 213 + .../json-iterator/go/jsoniter_bool_test.go | 84 + .../go/jsoniter_customize_test.go | 316 + .../json-iterator/go/jsoniter_demo_test.go | 87 + .../go/jsoniter_encode_interface_test.go | 42 + .../go/jsoniter_fixed_array_test.go | 37 + .../json-iterator/go/jsoniter_float_test.go | 210 + .../json-iterator/go/jsoniter_int_test.go | 499 + .../go/jsoniter_interface_test.go | 299 + .../json-iterator/go/jsoniter_invalid_test.go | 132 + .../json-iterator/go/jsoniter_io_test.go | 65 + .../go/jsoniter_iterator_test.go | 66 + .../go/jsoniter_large_file_test.go | 157 + .../json-iterator/go/jsoniter_map_test.go | 160 + .../go/jsoniter_must_be_valid_test.go | 71 + .../json-iterator/go/jsoniter_nested_test.go | 88 + .../json-iterator/go/jsoniter_null_test.go | 137 + .../json-iterator/go/jsoniter_object_test.go | 330 + .../go/jsoniter_optional_test.go | 46 + .../go/jsoniter_raw_message_test.go | 74 + .../json-iterator/go/jsoniter_reader_test.go | 57 + .../go/jsoniter_reflect_native_test.go | 154 + .../json-iterator/go/jsoniter_skip_test.go | 179 + .../json-iterator/go/jsoniter_sloppy_test.go | 162 + .../json-iterator/go/jsoniter_stream_test.go | 53 + .../json-iterator/go/jsoniter_string_test.go | 261 + .../go/jsoniter_struct_decoder_test.go | 267 + .../json-iterator/go/jsoniter_wrap_test.go | 118 + vendor/github.com/json-iterator/go/test.sh | 12 + .../json-iterator/go/unmarshal_input_test.go | 72 + .../opencontainers/go-digest/.pullapprove.yml | 12 + .../opencontainers/go-digest/.travis.yml | 4 + .../go-digest}/CONTRIBUTING.md | 23 +- .../go-digest/LICENSE.code} | 2 +- .../opencontainers/go-digest/LICENSE.docs | 425 + .../opencontainers/go-digest/MAINTAINERS | 7 + .../opencontainers/go-digest/README.md | 104 + .../go-digest/algorithm.go} | 47 +- .../go-digest/algorithm_test.go | 100 + .../go-digest}/digest.go | 55 +- .../go-digest}/digest_test.go | 14 +- .../opencontainers/go-digest/digester.go | 25 + .../go-digest}/doc.go | 0 .../go-digest}/verifiers.go | 13 - .../go-digest/verifiers_test.go | 66 + .../{ugorji/go => peterbourgon/diskv}/LICENSE | 13 +- .../github.com/peterbourgon/diskv/README.md | 141 + .../peterbourgon/diskv/basic_test.go | 336 + .../peterbourgon/diskv/compression.go | 64 + .../peterbourgon/diskv/compression_test.go | 72 + vendor/github.com/peterbourgon/diskv/diskv.go | 624 + .../peterbourgon/diskv/import_test.go | 76 + vendor/github.com/peterbourgon/diskv/index.go | 115 + .../peterbourgon/diskv/index_test.go | 148 + .../peterbourgon/diskv/issues_test.go | 121 + .../peterbourgon/diskv/keys_test.go | 231 + .../peterbourgon/diskv/speed_test.go | 153 + .../peterbourgon/diskv/stream_test.go | 117 + .../prometheus/client_golang/.travis.yml | 6 +- .../prometheus/client_golang/AUTHORS.md | 18 - .../prometheus/client_golang/CHANGELOG.md | 23 + .../prometheus/client_golang/CONTRIBUTING.md | 6 +- .../prometheus/client_golang/MAINTAINERS.md | 1 + .../prometheus/client_golang/NOTICE | 5 - .../prometheus/client_golang/README.md | 4 +- .../prometheus/client_golang/VERSION | 2 +- .../client_golang/prometheus/README.md | 54 +- .../prometheus/benchmark_test.go | 34 +- .../client_golang/prometheus/collector.go | 52 +- .../client_golang/prometheus/counter.go | 37 +- .../client_golang/prometheus/desc.go | 55 +- .../client_golang/prometheus/doc.go | 197 +- .../prometheus/example_clustermanager_test.go | 96 +- .../prometheus/example_memstats_test.go | 87 - .../prometheus/example_selfcollector_test.go | 69 - .../prometheus/example_timer_complex_test.go | 71 + .../prometheus/example_timer_gauge_test.go | 48 + .../prometheus/example_timer_test.go | 40 + .../client_golang/prometheus/examples_test.go | 317 +- .../{expvar.go => expvar_collector.go} | 32 +- ...xpvar_test.go => expvar_collector_test.go} | 2 +- .../client_golang/prometheus/fnv.go | 29 + .../client_golang/prometheus/gauge.go | 28 +- .../client_golang/prometheus/gauge_test.go | 20 + .../client_golang/prometheus/go_collector.go | 47 +- .../prometheus/go_collector_test.go | 51 +- .../client_golang/prometheus/histogram.go | 50 +- .../prometheus/histogram_test.go | 26 +- .../client_golang/prometheus/http.go | 243 +- .../client_golang/prometheus/http_test.go | 43 +- .../client_golang/prometheus/metric.go | 34 +- .../client_golang/prometheus/observer.go | 50 + .../prometheus/process_collector.go | 108 +- .../prometheus/process_collector_test.go | 60 +- .../prometheus/promhttp/delegator_1_7.go | 38 + .../prometheus/promhttp/delegator_1_8.go | 73 + .../client_golang/prometheus/promhttp/http.go | 204 + .../prometheus/promhttp/http_test.go | 131 + .../prometheus/promhttp/instrument_client.go | 95 + .../promhttp/instrument_client_1_8.go | 142 + .../promhttp/instrument_client_1_8_test.go | 195 + .../prometheus/promhttp/instrument_server.go | 505 + .../promhttp/instrument_server_test.go | 193 + .../client_golang/prometheus/push.go | 65 - .../client_golang/prometheus/registry.go | 931 +- .../client_golang/prometheus/registry_test.go | 139 +- .../client_golang/prometheus/summary.go | 73 +- .../client_golang/prometheus/summary_test.go | 57 +- .../client_golang/prometheus/timer.go | 48 + .../client_golang/prometheus/timer_test.go | 152 + .../client_golang/prometheus/untyped.go | 20 +- .../client_golang/prometheus/value.go | 15 +- .../client_golang/prometheus/vec.go | 253 +- .../client_golang/prometheus/vec_test.go | 255 +- .../stretchr/testify/assert/assertions.go | 12 +- .../testify/assert/assertions_test.go | 2 - vendor/github.com/ugorji/go/README.md | 20 - vendor/github.com/ugorji/go/codec/0doc.go | 199 - vendor/github.com/ugorji/go/codec/README.md | 148 - vendor/github.com/ugorji/go/codec/binc.go | 929 - vendor/github.com/ugorji/go/codec/cbor.go | 592 - .../github.com/ugorji/go/codec/cbor_test.go | 205 - .../github.com/ugorji/go/codec/codec_test.go | 1506 - .../ugorji/go/codec/codecgen_test.go | 24 - vendor/github.com/ugorji/go/codec/decode.go | 2053 - .../github.com/ugorji/go/codec/decode_go.go | 16 - .../github.com/ugorji/go/codec/decode_go14.go | 14 - vendor/github.com/ugorji/go/codec/encode.go | 1461 - .../ugorji/go/codec/fast-path.generated.go | 39352 ------- .../ugorji/go/codec/fast-path.go.tmpl | 527 - .../ugorji/go/codec/fast-path.not.go | 34 - .../ugorji/go/codec/gen-dec-array.go.tmpl | 104 - .../ugorji/go/codec/gen-dec-map.go.tmpl | 58 - .../ugorji/go/codec/gen-helper.generated.go | 243 - .../ugorji/go/codec/gen-helper.go.tmpl | 372 - .../ugorji/go/codec/gen.generated.go | 175 - vendor/github.com/ugorji/go/codec/gen.go | 2014 - vendor/github.com/ugorji/go/codec/gen_15.go | 12 - vendor/github.com/ugorji/go/codec/gen_16.go | 12 - vendor/github.com/ugorji/go/codec/gen_17.go | 10 - vendor/github.com/ugorji/go/codec/helper.go | 1314 - .../ugorji/go/codec/helper_internal.go | 242 - .../ugorji/go/codec/helper_not_unsafe.go | 20 - .../github.com/ugorji/go/codec/helper_test.go | 258 - .../ugorji/go/codec/helper_unsafe.go | 49 - vendor/github.com/ugorji/go/codec/json.go | 1234 - vendor/github.com/ugorji/go/codec/msgpack.go | 852 - vendor/github.com/ugorji/go/codec/noop.go | 213 - vendor/github.com/ugorji/go/codec/prebuild.go | 3 - vendor/github.com/ugorji/go/codec/prebuild.sh | 199 - vendor/github.com/ugorji/go/codec/py_test.go | 30 - vendor/github.com/ugorji/go/codec/rpc.go | 180 - vendor/github.com/ugorji/go/codec/simple.go | 526 - .../ugorji/go/codec/test-cbor-goldens.json | 639 - vendor/github.com/ugorji/go/codec/test.py | 126 - vendor/github.com/ugorji/go/codec/tests.sh | 102 - vendor/github.com/ugorji/go/codec/time.go | 233 - .../github.com/ugorji/go/codec/values_test.go | 203 - vendor/github.com/ugorji/go/msgpack.org.md | 47 - vendor/golang.org/x/crypto/AUTHORS | 2 +- vendor/golang.org/x/crypto/CONTRIBUTORS | 2 +- vendor/golang.org/x/crypto/README | 3 - vendor/golang.org/x/crypto/README.md | 21 + .../x/crypto/curve25519/const_amd64.h | 8 + .../x/crypto/curve25519/const_amd64.s | 6 +- .../x/crypto/curve25519/cswap_amd64.s | 131 +- .../x/crypto/curve25519/curve25519.go | 23 +- .../x/crypto/curve25519/curve25519_test.go | 10 + vendor/golang.org/x/crypto/curve25519/doc.go | 2 +- .../x/crypto/curve25519/freeze_amd64.s | 31 +- .../x/crypto/curve25519/ladderstep_amd64.s | 687 +- .../x/crypto/curve25519/mul_amd64.s | 46 +- .../x/crypto/curve25519/square_amd64.s | 31 +- vendor/golang.org/x/crypto/ed25519/ed25519.go | 6 +- .../x/crypto/ed25519/ed25519_test.go | 2 +- vendor/golang.org/x/crypto/ssh/certs.go | 36 +- vendor/golang.org/x/crypto/ssh/certs_test.go | 30 +- vendor/golang.org/x/crypto/ssh/channel.go | 4 +- vendor/golang.org/x/crypto/ssh/cipher.go | 66 +- vendor/golang.org/x/crypto/ssh/cipher_test.go | 68 +- vendor/golang.org/x/crypto/ssh/client.go | 62 +- vendor/golang.org/x/crypto/ssh/client_auth.go | 49 +- .../x/crypto/ssh/client_auth_test.go | 198 +- vendor/golang.org/x/crypto/ssh/client_test.go | 42 + vendor/golang.org/x/crypto/ssh/common.go | 31 +- vendor/golang.org/x/crypto/ssh/connection.go | 2 +- vendor/golang.org/x/crypto/ssh/doc.go | 3 + .../golang.org/x/crypto/ssh/example_test.go | 68 +- vendor/golang.org/x/crypto/ssh/handshake.go | 450 +- .../golang.org/x/crypto/ssh/handshake_test.go | 447 +- vendor/golang.org/x/crypto/ssh/kex.go | 38 +- vendor/golang.org/x/crypto/ssh/keys.go | 170 +- vendor/golang.org/x/crypto/ssh/keys_test.go | 44 + vendor/golang.org/x/crypto/ssh/mac.go | 10 +- vendor/golang.org/x/crypto/ssh/mux.go | 4 +- vendor/golang.org/x/crypto/ssh/mux_test.go | 3 + vendor/golang.org/x/crypto/ssh/server.go | 141 +- vendor/golang.org/x/crypto/ssh/session.go | 20 + .../golang.org/x/crypto/ssh/session_test.go | 10 +- vendor/golang.org/x/crypto/ssh/streamlocal.go | 115 + vendor/golang.org/x/crypto/ssh/tcpip.go | 198 +- .../x/crypto/ssh/terminal/terminal.go | 73 +- .../x/crypto/ssh/terminal/terminal_test.go | 59 + .../golang.org/x/crypto/ssh/terminal/util.go | 42 +- .../x/crypto/ssh/terminal/util_bsd.go | 6 +- .../x/crypto/ssh/terminal/util_linux.go | 9 +- .../x/crypto/ssh/terminal/util_solaris.go | 63 +- .../x/crypto/ssh/terminal/util_windows.go | 132 +- vendor/golang.org/x/crypto/ssh/transport.go | 50 +- vendor/golang.org/x/net/context/context.go | 112 +- .../x/net/context/ctxhttp/ctxhttp_17_test.go | 1 + vendor/golang.org/x/net/context/go17.go | 4 +- vendor/golang.org/x/net/context/go19.go | 20 + vendor/golang.org/x/net/context/pre_go17.go | 18 +- vendor/golang.org/x/net/context/pre_go19.go | 109 + .../x/net/context/withtimeout_test.go | 9 +- vendor/golang.org/x/net/http2/ciphers.go | 641 + vendor/golang.org/x/net/http2/ciphers_test.go | 309 + .../x/net/http2/client_conn_pool.go | 2 +- .../x/net/http2/configure_transport.go | 2 +- vendor/golang.org/x/net/http2/databuffer.go | 146 + .../golang.org/x/net/http2/databuffer_test.go | 157 + vendor/golang.org/x/net/http2/errors.go | 13 +- vendor/golang.org/x/net/http2/fixed_buffer.go | 60 - .../x/net/http2/fixed_buffer_test.go | 128 - vendor/golang.org/x/net/http2/frame.go | 81 +- vendor/golang.org/x/net/http2/frame_test.go | 91 +- vendor/golang.org/x/net/http2/go16.go | 27 - vendor/golang.org/x/net/http2/go18.go | 8 +- vendor/golang.org/x/net/http2/go18_test.go | 13 + vendor/golang.org/x/net/http2/go19.go | 16 + vendor/golang.org/x/net/http2/go19_test.go | 60 + vendor/golang.org/x/net/http2/hpack/encode.go | 29 +- .../x/net/http2/hpack/encode_test.go | 70 +- vendor/golang.org/x/net/http2/hpack/hpack.go | 104 +- .../x/net/http2/hpack/hpack_test.go | 150 +- vendor/golang.org/x/net/http2/hpack/tables.go | 255 +- .../x/net/http2/hpack/tables_test.go | 214 + vendor/golang.org/x/net/http2/http2.go | 8 +- vendor/golang.org/x/net/http2/not_go16.go | 25 - vendor/golang.org/x/net/http2/not_go18.go | 2 + vendor/golang.org/x/net/http2/not_go19.go | 16 + vendor/golang.org/x/net/http2/pipe.go | 18 +- vendor/golang.org/x/net/http2/pipe_test.go | 21 + vendor/golang.org/x/net/http2/server.go | 442 +- .../x/net/http2/server_push_test.go | 2 +- vendor/golang.org/x/net/http2/server_test.go | 233 +- vendor/golang.org/x/net/http2/transport.go | 256 +- .../golang.org/x/net/http2/transport_test.go | 442 +- .../x/net/http2/writesched_priority.go | 2 +- vendor/golang.org/x/net/idna/example_test.go | 70 + vendor/golang.org/x/net/idna/idna.go | 682 +- vendor/golang.org/x/net/idna/punycode.go | 23 +- vendor/golang.org/x/net/idna/tables.go | 4477 + vendor/golang.org/x/net/idna/trie.go | 72 + vendor/golang.org/x/net/idna/trieval.go | 114 + .../x/sys/windows/asm_windows_386.s | 13 + .../x/sys/windows/asm_windows_amd64.s | 13 + .../golang.org/x/sys/windows/dll_windows.go | 378 + vendor/golang.org/x/sys/windows/env_unset.go | 15 + .../golang.org/x/sys/windows/env_windows.go | 25 + vendor/golang.org/x/sys/windows/eventlog.go | 20 + .../golang.org/x/sys/windows/exec_windows.go | 97 + vendor/golang.org/x/sys/windows/mksyscall.go | 7 + vendor/golang.org/x/sys/windows/race.go | 30 + vendor/golang.org/x/sys/windows/race0.go | 25 + .../x/sys/windows/security_windows.go | 435 + vendor/golang.org/x/sys/windows/service.go | 143 + vendor/golang.org/x/sys/windows/str.go | 22 + vendor/golang.org/x/sys/windows/syscall.go | 71 + .../golang.org/x/sys/windows/syscall_test.go | 33 + .../x/sys/windows/syscall_windows.go | 995 + .../x/sys/windows/syscall_windows_test.go | 107 + .../x/sys/windows/zsyscall_windows.go | 2289 + .../x/sys/windows/ztypes_windows.go | 1242 + .../x/sys/windows/ztypes_windows_386.go | 22 + .../x/sys/windows/ztypes_windows_amd64.go | 22 + vendor/golang.org/x/text/.gitignore | 4 + vendor/golang.org/x/text/README | 20 + vendor/golang.org/x/text/cases/cases.go | 53 +- vendor/golang.org/x/text/cases/context.go | 101 +- .../golang.org/x/text/cases/context_test.go | 37 +- vendor/golang.org/x/text/cases/fold.go | 10 +- vendor/golang.org/x/text/cases/fold_test.go | 20 +- vendor/golang.org/x/text/cases/gen.go | 24 +- vendor/golang.org/x/text/cases/gen_trieval.go | 12 +- vendor/golang.org/x/text/cases/icu.go | 61 + vendor/golang.org/x/text/cases/icu_test.go | 210 + vendor/golang.org/x/text/cases/info.go | 15 +- vendor/golang.org/x/text/cases/map.go | 381 +- vendor/golang.org/x/text/cases/map_test.go | 688 +- vendor/golang.org/x/text/cases/tables.go | 390 +- vendor/golang.org/x/text/cases/tables_test.go | 3 +- vendor/golang.org/x/text/cases/trieval.go | 14 +- vendor/golang.org/x/text/gen.go | 147 +- vendor/golang.org/x/text/internal/gen.go | 52 + vendor/golang.org/x/text/internal/gen_test.go | 38 + vendor/golang.org/x/text/internal/internal.go | 51 + .../x/text/internal/internal_test.go | 38 + vendor/golang.org/x/text/internal/match.go | 67 + .../golang.org/x/text/internal/match_test.go | 56 + vendor/golang.org/x/text/internal/tables.go | 117 + vendor/golang.org/x/text/language/common.go | 2 +- .../golang.org/x/text/language/data_test.go | 59 +- .../x/text/language/{maketables.go => gen.go} | 208 +- vendor/golang.org/x/text/language/index.go | 1505 +- vendor/golang.org/x/text/language/language.go | 15 +- .../x/text/language/language_test.go | 8 +- vendor/golang.org/x/text/language/match.go | 177 +- .../golang.org/x/text/language/match_test.go | 190 +- vendor/golang.org/x/text/language/tables.go | 5197 +- vendor/golang.org/x/text/runes/cond.go | 77 +- vendor/golang.org/x/text/runes/cond_test.go | 81 +- vendor/golang.org/x/text/runes/runes.go | 139 +- vendor/golang.org/x/text/runes/runes_test.go | 173 +- .../bidirule/{go1_7_test.go => bench_test.go} | 18 +- .../x/text/secure/bidirule/bidirule.go | 110 +- .../x/text/secure/bidirule/bidirule_test.go | 318 +- .../x/text/secure/bidirule/go1_6_test.go | 24 - .../x/text/secure/precis/benchmark_test.go | 8 +- .../x/text/secure/precis/enforce_test.go | 163 +- .../x/text/secure/precis/go1_6_test.go | 24 - .../x/text/secure/precis/go1_7_test.go | 23 - .../x/text/secure/precis/options.go | 59 +- .../x/text/secure/precis/profile.go | 214 +- .../x/text/secure/precis/profile_test.go | 149 + .../x/text/secure/precis/profiles.go | 96 +- .../golang.org/x/text/secure/precis/tables.go | 2 +- .../x/text/secure/precis/tables_test.go | 2 +- .../x/text/secure/precis/trieval.go | 2 +- .../golang.org/x/text/transform/transform.go | 52 +- .../golang.org/x/text/unicode/bidi/bracket.go | 42 +- vendor/golang.org/x/text/unicode/bidi/core.go | 5 +- .../x/text/unicode/bidi/core_test.go | 4 +- .../x/text/unicode/bidi/ranges_test.go | 2 +- .../golang.org/x/text/unicode/bidi/tables.go | 2 +- .../golang.org/x/text/unicode/bidi/trieval.go | 2 +- .../x/text/unicode/norm/composition.go | 18 +- .../x/text/unicode/norm/example_test.go | 2 +- .../x/text/unicode/norm/forminfo.go | 49 +- .../golang.org/x/text/unicode/norm/input.go | 8 +- vendor/golang.org/x/text/unicode/norm/iter.go | 31 +- .../x/text/unicode/norm/iter_test.go | 2 +- .../x/text/unicode/norm/maketables.go | 12 +- .../x/text/unicode/norm/normalize.go | 55 +- .../x/text/unicode/norm/normalize_test.go | 278 +- .../x/text/unicode/norm/readwriter.go | 1 - .../golang.org/x/text/unicode/norm/tables.go | 1020 +- .../x/text/unicode/norm/transform_test.go | 4 +- vendor/golang.org/x/text/width/common_test.go | 2 +- vendor/golang.org/x/text/width/kind_string.go | 2 +- vendor/golang.org/x/text/width/runes_test.go | 2 +- vendor/golang.org/x/text/width/tables.go | 2 +- vendor/golang.org/x/text/width/transform.go | 77 + .../golang.org/x/text/width/transform_test.go | 914 +- vendor/golang.org/x/text/width/trieval.go | 2 +- vendor/golang.org/x/text/width/width.go | 9 +- vendor/google.golang.org/api/.travis.yml | 7 +- vendor/google.golang.org/api/README.md | 17 + vendor/google.golang.org/api/api-list.json | 248 +- .../api/cloudkms/v1/cloudkms-api.json | 1558 + .../api/cloudkms/v1/cloudkms-gen.go | 5157 + .../v2beta2/cloudmonitoring-api.json | 839 + .../v2beta2/cloudmonitoring-gen.go | 2247 + .../api/compute/v0.alpha/compute-api.json | 31300 +++++ .../api/compute/v0.alpha/compute-gen.go | 98380 ++++++++++++++++ .../api/compute/v0.beta/compute-api.json | 25049 ++++ .../api/compute/v0.beta/compute-gen.go | 78890 +++++++++++++ .../api/compute/v1/compute-api.json | 2788 +- .../api/compute/v1/compute-gen.go | 36638 +++--- .../api/container/v1/container-api.json | 3057 +- .../api/container/v1/container-gen.go | 2898 +- .../google.golang.org/api/dns/v1/dns-api.json | 708 + .../google.golang.org/api/dns/v1/dns-gen.go | 2028 + .../google.golang.org/api/gensupport/json.go | 49 +- .../api/gensupport/json_test.go | 27 + .../google.golang.org/api/gensupport/send.go | 6 + .../api/gensupport/send_test.go | 20 + vendor/google.golang.org/api/key.json.enc | Bin 1248 -> 2432 bytes .../api/logging/v2beta1/logging-api.json | 1531 + .../api/logging/v2beta1/logging-gen.go | 4427 + .../api/monitoring/v3/monitoring-api.json | 1629 + .../api/monitoring/v3/monitoring-gen.go | 4420 + .../api/pubsub/v1/pubsub-api.json | 1159 + .../api/pubsub/v1/pubsub-gen.go | 4457 + .../google.golang.org/appengine/.travis.yml | 19 +- .../appengine/CONTRIBUTING.md | 90 + vendor/google.golang.org/appengine/README.md | 46 +- .../google.golang.org/appengine/appengine.go | 3 +- .../appengine/internal/api.go | 49 +- .../appengine/internal/api_classic.go | 12 +- .../appengine/internal/api_common.go | 39 +- .../appengine/internal/api_test.go | 49 +- .../appengine/internal/identity_classic.go | 48 +- .../appengine/internal/identity_vm.go | 6 +- .../appengine/internal/main_vm.go | 6 +- vendor/gopkg.in/gcfg.v1/doc.go | 27 + vendor/gopkg.in/gcfg.v1/errors.go | 57 + vendor/gopkg.in/gcfg.v1/issues_test.go | 49 +- vendor/gopkg.in/gcfg.v1/read.go | 94 +- vendor/gopkg.in/gcfg.v1/read_test.go | 44 +- vendor/gopkg.in/gcfg.v1/set.go | 55 +- .../warnings.v0}/LICENSE | 5 +- vendor/gopkg.in/warnings.v0/README | 74 + vendor/gopkg.in/warnings.v0/warnings.go | 191 + vendor/gopkg.in/warnings.v0/warnings_test.go | 82 + vendor/k8s.io/api/OWNERS | 51 + .../api/admissionregistration/v1alpha1/BUILD | 25 +- .../v1alpha1/generated.pb.go | 147 +- .../v1alpha1/generated.proto | 7 - .../v1alpha1/types.generated.go | 4232 - .../admissionregistration/v1alpha1/types.go | 21 +- .../v1alpha1/types_swagger_doc_generated.go | 7 +- .../v1alpha1/zz_generated.deepcopy.go | 443 +- vendor/k8s.io/api/apps/v1beta1/BUILD | 26 +- .../k8s.io/api/apps/v1beta1/generated.pb.go | 263 +- .../k8s.io/api/apps/v1beta1/generated.proto | 28 +- .../api/apps/v1beta1/types.generated.go | 8414 -- vendor/k8s.io/api/apps/v1beta1/types.go | 52 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 864 +- .../apis/policy => api/apps/v1beta2}/BUILD | 22 +- .../v2alpha1 => apps/v1beta2}/doc.go | 2 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 6934 ++ .../k8s.io/api/apps/v1beta2/generated.proto | 691 + .../apps => api/apps/v1beta2}/register.go | 37 +- vendor/k8s.io/api/apps/v1beta2/types.go | 823 + .../v1beta2/types_swagger_doc_generated.go | 368 + .../api/apps/v1beta2/zz_generated.deepcopy.go | 1017 + vendor/k8s.io/api/authentication/v1/BUILD | 22 +- vendor/k8s.io/api/authentication/v1/types.go | 7 +- .../v1/zz_generated.deepcopy.go | 147 +- .../k8s.io/api/authentication/v1beta1/BUILD | 25 +- .../authentication/v1beta1/types.generated.go | 1568 - .../api/authentication/v1beta1/types.go | 7 +- .../v1beta1/zz_generated.deepcopy.go | 147 +- vendor/k8s.io/api/authorization/v1/BUILD | 25 +- .../api/authorization/v1/generated.pb.go | 1408 +- .../api/authorization/v1/generated.proto | 84 +- .../k8s.io/api/authorization/v1/register.go | 1 + .../api/authorization/v1/types.generated.go | 3233 - vendor/k8s.io/api/authorization/v1/types.go | 103 +- .../v1/types_swagger_doc_generated.go | 53 + .../authorization/v1/zz_generated.deepcopy.go | 466 +- vendor/k8s.io/api/authorization/v1beta1/BUILD | 25 +- .../api/authorization/v1beta1/generated.pb.go | 1408 +- .../api/authorization/v1beta1/generated.proto | 84 +- .../api/authorization/v1beta1/register.go | 1 + .../authorization/v1beta1/types.generated.go | 3233 - .../k8s.io/api/authorization/v1beta1/types.go | 103 +- .../v1beta1/types_swagger_doc_generated.go | 53 + .../v1beta1/zz_generated.deepcopy.go | 466 +- vendor/k8s.io/api/autoscaling/v1/BUILD | 25 +- .../api/autoscaling/v1/types.generated.go | 5633 - vendor/k8s.io/api/autoscaling/v1/types.go | 7 +- .../autoscaling/v1/zz_generated.deepcopy.go | 613 +- vendor/k8s.io/api/autoscaling/v2alpha1/BUILD | 33 - .../autoscaling/v2alpha1/types.generated.go | 5218 - .../v2alpha1/zz_generated.deepcopy.go | 319 - .../autoscaling/v2beta1}/BUILD | 20 +- .../autoscaling/v2beta1}/doc.go | 3 +- .../{v2alpha1 => v2beta1}/generated.pb.go | 202 +- .../{v2alpha1 => v2beta1}/generated.proto | 5 +- .../{v2alpha1 => v2beta1}/register.go | 4 +- .../{v2alpha1 => v2beta1}/types.go | 7 +- .../types_swagger_doc_generated.go | 2 +- .../v2beta1/zz_generated.deepcopy.go | 491 + vendor/k8s.io/api/batch/v1/BUILD | 27 +- vendor/k8s.io/api/batch/v1/generated.pb.go | 142 +- vendor/k8s.io/api/batch/v1/generated.proto | 7 +- vendor/k8s.io/api/batch/v1/types.generated.go | 2681 - vendor/k8s.io/api/batch/v1/types.go | 17 +- .../batch/v1/types_swagger_doc_generated.go | 3 +- .../api/batch/v1/zz_generated.deepcopy.go | 265 +- .../authorization => api/batch/v1beta1}/BUILD | 19 +- .../batch/v1beta1/doc.go} | 5 +- .../k8s.io/api/batch/v1beta1/generated.pb.go | 1509 + .../k8s.io/api/batch/v1beta1/generated.proto | 135 + .../batch => api/batch/v1beta1}/register.go | 24 +- vendor/k8s.io/api/batch/v1beta1/types.go | 155 + .../v1beta1/types_swagger_doc_generated.go | 96 + .../batch/v1beta1/zz_generated.deepcopy.go | 258 + vendor/k8s.io/api/batch/v2alpha1/BUILD | 27 +- vendor/k8s.io/api/batch/v2alpha1/register.go | 2 - .../api/batch/v2alpha1/types.generated.go | 2525 - vendor/k8s.io/api/batch/v2alpha1/types.go | 7 +- .../batch/v2alpha1/zz_generated.deepcopy.go | 282 +- vendor/k8s.io/api/certificates/v1beta1/BUILD | 25 +- .../api/certificates/v1beta1/generated.pb.go | 105 +- .../api/certificates/v1beta1/generated.proto | 2 +- .../certificates/v1beta1/types.generated.go | 2624 - .../k8s.io/api/certificates/v1beta1/types.go | 9 +- .../v1beta1/zz_generated.deepcopy.go | 240 +- vendor/k8s.io/api/core/v1/BUILD | 25 +- .../api/core/v1/annotation_key_constants.go | 18 +- vendor/k8s.io/api/core/v1/generated.pb.go | 4863 +- vendor/k8s.io/api/core/v1/generated.proto | 246 +- vendor/k8s.io/api/core/v1/register.go | 1 + vendor/k8s.io/api/core/v1/resource.go | 4 +- vendor/k8s.io/api/core/v1/types.generated.go | 76734 ------------ vendor/k8s.io/api/core/v1/types.go | 454 +- .../core/v1/types_swagger_doc_generated.go | 138 +- .../api/core/v1/zz_generated.deepcopy.go | 7828 +- vendor/k8s.io/api/extensions/v1beta1/BUILD | 25 +- .../api/extensions/v1beta1/generated.pb.go | 1444 +- .../api/extensions/v1beta1/generated.proto | 116 +- .../api/extensions/v1beta1/types.generated.go | 21979 ---- vendor/k8s.io/api/extensions/v1beta1/types.go | 180 +- .../v1beta1/types_swagger_doc_generated.go | 80 +- .../v1beta1/zz_generated.deepcopy.go | 2424 +- vendor/k8s.io/api/kubernetes-sha | 1 - vendor/k8s.io/api/networking/v1/BUILD | 25 +- .../k8s.io/api/networking/v1/generated.pb.go | 630 +- .../k8s.io/api/networking/v1/generated.proto | 63 + .../api/networking/v1/types.generated.go | 2322 - vendor/k8s.io/api/networking/v1/types.go | 78 +- .../v1/types_swagger_doc_generated.go | 23 + .../networking/v1/zz_generated.deepcopy.go | 353 +- vendor/k8s.io/api/policy/v1beta1/BUILD | 25 +- .../api/policy/v1beta1/types.generated.go | 2305 - vendor/k8s.io/api/policy/v1beta1/types.go | 10 +- .../policy/v1beta1/zz_generated.deepcopy.go | 238 +- .../apis/authentication => api/rbac/v1}/BUILD | 19 +- .../pkg/apis/rbac => api/rbac/v1}/doc.go | 6 +- vendor/k8s.io/api/rbac/v1/generated.pb.go | 2555 + vendor/k8s.io/api/rbac/v1/generated.proto | 182 + .../pkg/apis/rbac => api/rbac/v1}/register.go | 20 +- .../pkg/apis/rbac => api/rbac/v1}/types.go | 133 +- .../rbac/v1/types_swagger_doc_generated.go | 148 + .../api/rbac/v1/zz_generated.deepcopy.go | 427 + vendor/k8s.io/api/rbac/v1alpha1/BUILD | 25 +- .../api/rbac/v1alpha1/types.generated.go | 4879 - vendor/k8s.io/api/rbac/v1alpha1/types.go | 24 +- .../rbac/v1alpha1/zz_generated.deepcopy.go | 520 +- vendor/k8s.io/api/rbac/v1beta1/BUILD | 25 +- .../api/rbac/v1beta1/types.generated.go | 4879 - vendor/k8s.io/api/rbac/v1beta1/types.go | 24 +- .../api/rbac/v1beta1/zz_generated.deepcopy.go | 520 +- .../scheduling/v1alpha1}/BUILD | 20 +- .../scheduling/v1alpha1}/doc.go | 7 +- .../api/scheduling/v1alpha1/generated.pb.go | 641 + .../api/scheduling/v1alpha1/generated.proto | 65 + .../scheduling/v1alpha1}/register.go | 32 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 63 + .../v1alpha1/types_swagger_doc_generated.go | 52 + .../v1alpha1/zz_generated.deepcopy.go | 109 + vendor/k8s.io/api/settings/v1alpha1/BUILD | 22 +- vendor/k8s.io/api/settings/v1alpha1/types.go | 5 +- .../v1alpha1/zz_generated.deepcopy.go | 177 +- vendor/k8s.io/api/storage/v1/BUILD | 32 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 197 +- vendor/k8s.io/api/storage/v1/generated.proto | 16 + vendor/k8s.io/api/storage/v1/types.go | 23 +- .../storage/v1/types_swagger_doc_generated.go | 11 +- .../api/storage/v1/zz_generated.deepcopy.go | 122 +- vendor/k8s.io/api/storage/v1beta1/BUILD | 35 +- .../api/storage/v1beta1/generated.pb.go | 198 +- .../api/storage/v1beta1/generated.proto | 16 + .../api/storage/v1beta1/types.generated.go | 985 - vendor/k8s.io/api/storage/v1beta1/types.go | 23 +- .../v1beta1/types_swagger_doc_generated.go | 11 +- .../storage/v1beta1/zz_generated.deepcopy.go | 122 +- vendor/k8s.io/apiextensions-apiserver/BUILD | 38 +- .../apiextensions-apiserver/kubernetes-sha | 1 - vendor/k8s.io/apiextensions-apiserver/main.go | 9 +- .../pkg/apis/apiextensions/BUILD | 25 +- .../pkg/apis/apiextensions/deepcopy.go | 279 + .../pkg/apis/apiextensions/types.go | 16 +- .../apis/apiextensions/types_jsonschema.go | 96 + .../pkg/apis/apiextensions/v1beta1/BUILD | 42 +- .../apis/apiextensions/v1beta1/conversion.go | 73 + .../apiextensions/v1beta1/conversion_test.go | 113 + .../apis/apiextensions/v1beta1/deepcopy.go | 257 + .../pkg/apis/apiextensions/v1beta1/doc.go | 3 +- .../apiextensions/v1beta1/generated.pb.go | 4377 +- .../apiextensions/v1beta1/generated.proto | 122 + .../pkg/apis/apiextensions/v1beta1/marshal.go | 134 + .../apiextensions/v1beta1/marshal_test.go | 150 + .../apis/apiextensions/v1beta1/register.go | 9 +- .../pkg/apis/apiextensions/v1beta1/types.go | 18 +- .../apiextensions/v1beta1/types_jsonschema.go | 98 + .../v1beta1/zz_generated.conversion.go | 621 +- .../v1beta1/zz_generated.deepcopy.go | 413 +- .../apiextensions/zz_generated.deepcopy.go | 390 +- .../pkg/client/clientset/clientset/BUILD | 21 +- .../client/clientset/clientset/clientset.go | 18 +- .../client/clientset/clientset/scheme/BUILD | 16 +- .../typed/apiextensions/v1beta1/BUILD | 19 +- .../v1beta1/customresourcedefinition.go | 66 +- .../pkg/features/BUILD | 25 + .../pkg/features/kube_features.go | 46 + vendor/k8s.io/apimachinery/OWNERS | 2 +- vendor/k8s.io/apimachinery/kubernetes-sha | 1 - .../apimachinery/pkg/api/equality/BUILD | 16 +- .../apimachinery/pkg/api/equality/semantic.go | 3 + .../k8s.io/apimachinery/pkg/api/errors/BUILD | 17 +- .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 2 +- .../apimachinery/pkg/api/errors/errors.go | 105 +- .../pkg/api/errors/errors_test.go | 34 +- vendor/k8s.io/apimachinery/pkg/api/meta/BUILD | 19 +- .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 1 - .../apimachinery/pkg/api/meta/default.go | 51 - .../k8s.io/apimachinery/pkg/api/meta/help.go | 3 + .../apimachinery/pkg/api/meta/interfaces.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/meta.go | 50 +- .../apimachinery/pkg/api/meta/restmapper.go | 3 + .../apimachinery/pkg/api/resource/BUILD | 26 +- .../apimachinery/pkg/api/resource/OWNERS | 2 +- .../apimachinery/pkg/api/resource/quantity.go | 2 +- .../apimachinery/pkg/api/validation/BUILD | 20 +- .../pkg/api/validation/objectmeta.go | 9 +- .../pkg/api/validation/objectmeta_test.go | 36 +- .../apimachinery/pkg/apimachinery/BUILD | 21 +- .../pkg/apimachinery/announced/BUILD | 17 +- .../apimachinery/announced/announced_test.go | 2 - .../apimachinery/announced/group_factory.go | 27 +- .../pkg/apimachinery/registered/BUILD | 17 +- .../pkg/apimachinery/registered/registered.go | 48 +- .../registered/registered_test.go | 77 - .../pkg/apis/meta/internalversion}/BUILD | 48 +- .../apis/meta/internalversion/conversion.go | 77 + .../pkg/apis/meta/internalversion}/doc.go | 4 +- .../pkg/apis/meta/internalversion/register.go | 108 + .../meta/internalversion/register_test.go | 89 + .../meta/internalversion/roundtrip_test.go} | 12 +- .../pkg/apis/meta/internalversion/types.go | 70 + .../zz_generated.conversion.go | 113 + .../internalversion/zz_generated.deepcopy.go | 126 + .../apimachinery/pkg/apis/meta/v1/BUILD | 34 +- .../apimachinery/pkg/apis/meta/v1/OWNERS | 1 - .../pkg/apis/meta/v1/controller_ref.go | 54 + .../pkg/apis/meta/v1/controller_ref_test.go | 133 + .../pkg/apis/meta/v1/conversion.go | 18 + .../apimachinery/pkg/apis/meta/v1/doc.go | 2 +- .../pkg/apis/meta/v1/generated.pb.go | 707 +- .../pkg/apis/meta/v1/generated.proto | 70 +- .../pkg/apis/meta/v1/group_version_test.go | 10 +- .../apimachinery/pkg/apis/meta/v1/meta.go | 23 +- .../pkg/apis/meta/v1/micro_time.go | 14 +- .../apimachinery/pkg/apis/meta/v1/time.go | 12 +- .../apimachinery/pkg/apis/meta/v1/types.go | 98 +- .../meta/v1/types_swagger_doc_generated.go | 21 +- .../pkg/apis/meta/v1/types_test.go | 4 +- .../pkg/apis/meta/v1/unstructured/BUILD | 24 +- .../apis/meta/v1/unstructured/unstructured.go | 126 +- .../meta/v1/unstructured/unstructured_test.go | 26 +- .../v1/unstructured/zz_generated.deepcopy.go | 75 + .../pkg/apis/meta/v1/validation/BUILD | 17 +- .../pkg/apis/meta/v1/validation/validation.go | 2 + .../apimachinery/pkg/apis/meta/v1/watch.go | 9 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 1316 +- .../apimachinery/pkg/apis/meta/v1alpha1/BUILD | 24 +- .../pkg/apis/meta/v1alpha1/conversion.go} | 16 +- .../pkg/apis/meta/v1alpha1/deepcopy.go | 61 + .../pkg/apis/meta/v1alpha1/generated.proto | 3 + .../pkg/apis/meta/v1alpha1/register.go | 6 + .../pkg/apis/meta/v1alpha1/types.go | 4 + .../meta/v1alpha1/zz_generated.deepcopy.go | 268 +- .../k8s.io/apimachinery/pkg/conversion/BUILD | 21 +- .../pkg/conversion/queryparams/BUILD | 17 +- .../pkg/conversion/unstructured/BUILD | 33 +- .../pkg/conversion/unstructured/converter.go | 112 +- .../conversion/unstructured/converter_test.go | 462 - vendor/k8s.io/apimachinery/pkg/fields/BUILD | 17 +- .../apimachinery/pkg/fields/selector.go | 32 + .../apimachinery/pkg/fields/selector_test.go | 2 +- vendor/k8s.io/apimachinery/pkg/labels/BUILD | 19 +- .../apimachinery/pkg/labels/selector.go | 20 + .../pkg/labels/zz_generated.deepcopy.go | 59 + vendor/k8s.io/apimachinery/pkg/openapi/BUILD | 21 - vendor/k8s.io/apimachinery/pkg/openapi/doc.go | 18 - vendor/k8s.io/apimachinery/pkg/runtime/BUILD | 30 +- .../apimachinery/pkg/runtime/conversion.go | 15 + .../pkg/runtime/conversion_test.go | 46 +- .../apimachinery/pkg/runtime/embedded.go | 6 + .../apimachinery/pkg/runtime/embedded_test.go | 100 +- .../apimachinery/pkg/runtime/extension.go | 5 +- .../pkg/runtime/extension_test.go | 74 + .../apimachinery/pkg/runtime/generated.proto | 3 +- .../apimachinery/pkg/runtime/interfaces.go | 9 +- .../apimachinery/pkg/runtime/register.go | 2 +- .../apimachinery/pkg/runtime/schema/BUILD | 23 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 16 +- .../apimachinery/pkg/runtime/scheme_test.go | 402 +- .../apimachinery/pkg/runtime/serializer/BUILD | 27 +- .../pkg/runtime/serializer/codec_test.go | 151 +- .../pkg/runtime/serializer/json/BUILD | 20 +- .../pkg/runtime/serializer/json/json.go | 42 +- .../pkg/runtime/serializer/json/json_test.go | 3 + .../pkg/runtime/serializer/protobuf/BUILD | 16 +- .../pkg/runtime/serializer/recognizer/BUILD | 19 +- .../pkg/runtime/serializer/streaming/BUILD | 17 +- .../pkg/runtime/serializer/versioning/BUILD | 17 +- .../serializer/versioning/versioning_test.go | 14 + .../pkg/runtime/swagger_doc_generator.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 6 +- .../pkg/runtime/zz_generated.deepcopy.go | 130 +- .../k8s.io/apimachinery/pkg/selection/BUILD | 14 +- vendor/k8s.io/apimachinery/pkg/types/BUILD | 14 +- .../k8s.io/apimachinery/pkg/util/cache/BUILD | 17 +- .../k8s.io/apimachinery/pkg/util/clock/BUILD | 15 +- .../k8s.io/apimachinery/pkg/util/diff/BUILD | 17 +- .../apimachinery/pkg/util/diff/diff_test.go | 8 + .../k8s.io/apimachinery/pkg/util/errors/BUILD | 15 +- .../apimachinery/pkg/util/errors/errors.go | 2 +- .../pkg/util/errors/errors_test.go | 11 +- .../k8s.io/apimachinery/pkg/util/framer/BUILD | 15 +- .../apimachinery/pkg/util/httpstream/BUILD | 18 +- .../pkg/util/httpstream/spdy/BUILD | 17 +- .../pkg/util/httpstream/spdy/roundtripper.go | 22 +- .../k8s.io/apimachinery/pkg/util/intstr/BUILD | 25 +- .../apimachinery/pkg/util/intstr/intstr.go | 2 +- .../k8s.io/apimachinery/pkg/util/json/BUILD | 19 +- .../k8s.io/apimachinery/pkg/util/json/json.go | 12 + .../apimachinery/pkg/util/mergepatch/BUILD | 17 +- vendor/k8s.io/apimachinery/pkg/util/net/BUILD | 17 +- .../k8s.io/apimachinery/pkg/util/net/http.go | 32 +- .../apimachinery/pkg/util/net/http_test.go | 38 + .../apimachinery/pkg/util/net/interface.go | 314 +- .../pkg/util/net/interface_test.go | 605 +- .../k8s.io/apimachinery/pkg/util/rand/BUILD | 15 +- .../k8s.io/apimachinery/pkg/util/rand/rand.go | 12 +- .../apimachinery/pkg/util/remotecommand/BUILD | 16 +- .../apimachinery/pkg/util/runtime/BUILD | 17 +- .../k8s.io/apimachinery/pkg/util/sets/BUILD | 24 +- .../pkg/util/strategicpatch/BUILD | 17 +- .../pkg/util/strategicpatch/patch.go | 2 +- .../pkg/util/strategicpatch/patch_test.go | 78 +- .../k8s.io/apimachinery/pkg/util/uuid/BUILD | 16 +- .../apimachinery/pkg/util/validation/BUILD | 20 +- .../pkg/util/validation/field/BUILD | 17 +- .../pkg/util/validation/validation.go | 64 +- .../pkg/util/validation/validation_test.go | 61 + .../k8s.io/apimachinery/pkg/util/wait/BUILD | 17 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 36 + .../k8s.io/apimachinery/pkg/util/yaml/BUILD | 17 +- vendor/k8s.io/apimachinery/pkg/version/BUILD | 14 +- vendor/k8s.io/apimachinery/pkg/watch/BUILD | 30 +- vendor/k8s.io/apimachinery/pkg/watch/mux.go | 7 + .../k8s.io/apimachinery/pkg/watch/mux_test.go | 8 + .../apimachinery/pkg/watch/until_test.go | 172 + vendor/k8s.io/apimachinery/pkg/watch/watch.go | 5 +- .../apimachinery/pkg/watch/watch_test.go | 2 + .../pkg/watch/zz_generated.deepcopy.go | 59 + .../third_party/forked/golang/json/BUILD | 21 +- .../third_party/forked/golang/json/fields.go | 3 + .../forked/golang/json/fields_test.go | 30 + .../third_party/forked/golang/netutil/BUILD | 14 +- .../third_party/forked/golang/reflect/BUILD | 20 +- .../third_party/forked/golang/reflect/type.go | 91 - vendor/k8s.io/apiserver/OWNERS | 1 + vendor/k8s.io/apiserver/kubernetes-sha | 1 - .../pkg/authentication/authenticator/BUILD | 16 +- .../pkg/authentication/serviceaccount/BUILD | 17 +- .../pkg/authentication/serviceaccount/util.go | 4 +- .../apiserver/pkg/authentication/user/BUILD | 14 +- vendor/k8s.io/apiserver/pkg/features/BUILD | 16 +- .../apiserver/pkg/features/kube_features.go | 23 +- .../k8s.io/apiserver/pkg/util/feature/BUILD | 20 +- .../pkg/util/feature/feature_gate.go | 32 +- vendor/k8s.io/client-go/CHANGELOG.md | 100 +- vendor/k8s.io/client-go/OWNERS | 5 +- vendor/k8s.io/client-go/README.md | 32 +- vendor/k8s.io/client-go/discovery/BUILD | 28 +- .../client-go/discovery/discovery_client.go | 48 +- .../discovery/discovery_client_test.go | 63 +- vendor/k8s.io/client-go/discovery/fake/BUILD | 29 +- .../client-go/discovery/fake/discovery.go | 15 +- .../discovery/fake/discovery_test.go | 46 + .../k8s.io/client-go/discovery/restmapper.go | 31 +- .../client-go/discovery/restmapper_test.go | 42 +- vendor/k8s.io/client-go/kubernetes-sha | 1 - vendor/k8s.io/client-go/kubernetes/BUILD | 51 +- .../k8s.io/client-go/kubernetes/clientset.go | 416 +- vendor/k8s.io/client-go/kubernetes/fake/BUILD | 34 +- .../kubernetes/fake/clientset_generated.go | 71 +- .../client-go/kubernetes/fake/register.go | 12 +- vendor/k8s.io/client-go/kubernetes/import.go | 19 + .../k8s.io/client-go/kubernetes/scheme/BUILD | 22 +- .../client-go/kubernetes/scheme/register.go | 12 +- .../admissionregistration/v1alpha1/BUILD | 19 +- .../externaladmissionhookconfiguration.go | 64 +- .../admissionregistration/v1alpha1/fake/BUILD | 16 +- ...fake_externaladmissionhookconfiguration.go | 68 +- .../fake/fake_initializerconfiguration.go | 68 +- .../v1alpha1/initializerconfiguration.go | 64 +- .../kubernetes/typed/apps/v1beta1/BUILD | 19 +- .../typed/apps/v1beta1/controllerrevision.go | 70 +- .../typed/apps/v1beta1/deployment.go | 72 +- .../kubernetes/typed/apps/v1beta1/fake/BUILD | 16 +- .../v1beta1/fake/fake_controllerrevision.go | 74 +- .../apps/v1beta1/fake/fake_deployment.go | 84 +- .../apps/v1beta1/fake/fake_statefulset.go | 84 +- .../typed/apps/v1beta1/statefulset.go | 72 +- .../kubernetes/typed/apps/v1beta2}/BUILD | 11 +- .../typed/apps/v1beta2}/apps_client.go | 50 +- .../typed/apps/v1beta2}/controllerrevision.go | 100 +- .../typed/apps/v1beta2}/daemonset.go | 108 +- .../typed/apps/v1beta2}/deployment.go | 108 +- .../kubernetes/typed/apps/v1beta2}/doc.go | 2 +- .../kubernetes/typed/apps/v1beta2/fake/BUILD | 44 + .../v2alpha1 => apps/v1beta2}/fake/doc.go | 0 .../apps/v1beta2/fake/fake_apps_client.go | 58 + .../v1beta2/fake/fake_controllerrevision.go | 126 + .../typed/apps/v1beta2/fake/fake_daemonset.go | 138 + .../apps/v1beta2/fake/fake_deployment.go | 138 + .../apps/v1beta2/fake/fake_replicaset.go | 138 + .../typed/apps/v1beta2/fake/fake_scale.go} | 8 +- .../apps/v1beta2/fake/fake_statefulset.go | 160 + .../apps/v1beta2}/generated_expansion.go | 6 +- .../typed/apps/v1beta2}/replicaset.go | 108 +- .../kubernetes/typed/apps/v1beta2}/scale.go | 4 +- .../typed/apps/v1beta2}/statefulset.go | 117 +- .../kubernetes/typed/authentication/v1/BUILD | 19 +- .../typed/authentication/v1/fake/BUILD | 16 +- .../typed/authentication/v1beta1/BUILD | 19 +- .../typed/authentication/v1beta1/fake/BUILD | 16 +- .../kubernetes/typed/authorization/v1/BUILD | 21 +- .../authorization/v1/authorization_client.go | 5 + .../typed/authorization/v1/fake/BUILD | 18 +- .../v1/fake/fake_authorization_client.go | 4 + .../v1/fake/fake_selfsubjectrulesreview.go} | 11 +- .../fake_selfsubjectrulesreview_expansion.go} | 19 +- .../v1/selfsubjectrulesreview.go} | 22 +- .../v1/selfsubjectrulesreview_expansion.go} | 12 +- .../typed/authorization/v1beta1/BUILD | 21 +- .../v1beta1/authorization_client.go | 5 + .../typed/authorization/v1beta1/fake/BUILD | 18 +- .../v1beta1/fake/fake_authorization_client.go | 4 + .../fake/fake_selfsubjectrulesreview.go} | 11 +- .../fake_selfsubjectrulesreview_expansion.go} | 17 +- .../v1beta1/selfsubjectrulesreview.go} | 22 +- .../selfsubjectrulesreview_expansion.go} | 14 +- .../kubernetes/typed/autoscaling/v1/BUILD | 19 +- .../typed/autoscaling/v1/fake/BUILD | 16 +- .../v1/fake/fake_horizontalpodautoscaler.go | 102 +- .../autoscaling/v1/horizontalpodautoscaler.go | 72 +- .../typed/autoscaling/v2alpha1/BUILD | 28 - .../typed/autoscaling/v2alpha1/doc.go | 20 - .../typed/autoscaling/v2beta1}/BUILD | 18 +- .../autoscaling_client.go | 32 +- .../typed/autoscaling/v2beta1}/doc.go | 2 +- .../{v2alpha1 => v2beta1}/fake/BUILD | 29 +- .../typed/autoscaling/v2beta1/fake}/doc.go | 4 +- .../fake/fake_autoscaling_client.go | 8 +- .../fake/fake_horizontalpodautoscaler.go | 106 +- .../v2beta1}/generated_expansion.go | 2 +- .../horizontalpodautoscaler.go | 106 +- .../client-go/kubernetes/typed/batch/v1/BUILD | 19 +- .../kubernetes/typed/batch/v1/fake/BUILD | 16 +- .../typed/batch/v1/fake/fake_job.go | 102 +- .../kubernetes/typed/batch/v1/job.go | 72 +- .../kubernetes/typed/batch/v1beta1}/BUILD | 6 +- .../typed/batch/v1beta1}/batch_client.go | 34 +- .../typed/batch/v1beta1}/cronjob.go | 108 +- .../kubernetes/typed/batch}/v1beta1/doc.go | 0 .../typed/batch/v1beta1/fake}/BUILD | 18 +- .../typed/batch/v1beta1/fake}/doc.go | 4 +- .../batch/v1beta1/fake/fake_batch_client.go} | 27 +- .../typed/batch/v1beta1/fake/fake_cronjob.go | 138 + .../batch/v1beta1}/generated_expansion.go | 2 +- .../kubernetes/typed/batch/v2alpha1/BUILD | 19 +- .../typed/batch/v2alpha1/cronjob.go | 72 +- .../typed/batch/v2alpha1/fake/BUILD | 16 +- .../typed/batch/v2alpha1/fake/fake_cronjob.go | 84 +- .../typed/certificates/v1beta1/BUILD | 19 +- .../v1beta1/certificatesigningrequest.go | 66 +- .../typed/certificates/v1beta1/fake/BUILD | 16 +- .../fake/fake_certificatesigningrequest.go | 78 +- .../client-go/kubernetes/typed/core/v1/BUILD | 22 +- .../typed/core/v1/componentstatus.go | 64 +- .../kubernetes/typed/core/v1/configmap.go | 70 +- .../kubernetes/typed/core/v1/endpoints.go | 70 +- .../kubernetes/typed/core/v1/event.go | 70 +- .../typed/core/v1/event_expansion.go | 2 +- .../kubernetes/typed/core/v1/fake/BUILD | 17 +- .../core/v1/fake/fake_componentstatus.go | 88 +- .../typed/core/v1/fake/fake_configmap.go | 88 +- .../typed/core/v1/fake/fake_endpoints.go | 88 +- .../typed/core/v1/fake/fake_event.go | 88 +- .../typed/core/v1/fake/fake_limitrange.go | 88 +- .../typed/core/v1/fake/fake_namespace.go | 102 +- .../typed/core/v1/fake/fake_node.go | 102 +- .../core/v1/fake/fake_persistentvolume.go | 102 +- .../v1/fake/fake_persistentvolumeclaim.go | 102 +- .../kubernetes/typed/core/v1/fake/fake_pod.go | 102 +- .../typed/core/v1/fake/fake_podtemplate.go | 88 +- .../v1/fake/fake_replicationcontroller.go | 117 +- .../typed/core/v1/fake/fake_resourcequota.go | 102 +- .../typed/core/v1/fake/fake_secret.go | 88 +- .../typed/core/v1/fake/fake_service.go | 102 +- .../typed/core/v1/fake/fake_serviceaccount.go | 88 +- .../kubernetes/typed/core/v1/limitrange.go | 70 +- .../kubernetes/typed/core/v1/namespace.go | 66 +- .../kubernetes/typed/core/v1/node.go | 66 +- .../typed/core/v1/persistentvolume.go | 66 +- .../typed/core/v1/persistentvolumeclaim.go | 72 +- .../client-go/kubernetes/typed/core/v1/pod.go | 72 +- .../kubernetes/typed/core/v1/podtemplate.go | 70 +- .../typed/core/v1/replicationcontroller.go | 84 +- .../kubernetes/typed/core/v1/resourcequota.go | 72 +- .../kubernetes/typed/core/v1/secret.go | 70 +- .../kubernetes/typed/core/v1/service.go | 72 +- .../typed/core/v1/serviceaccount.go | 70 +- .../kubernetes/typed/extensions/v1beta1/BUILD | 19 +- .../typed/extensions/v1beta1/daemonset.go | 72 +- .../typed/extensions/v1beta1/deployment.go | 83 +- .../typed/extensions/v1beta1/fake/BUILD | 16 +- .../extensions/v1beta1/fake/fake_daemonset.go | 84 +- .../v1beta1/fake/fake_deployment.go | 84 +- .../extensions/v1beta1/fake/fake_ingress.go | 84 +- .../v1beta1/fake/fake_podsecuritypolicy.go | 68 +- .../v1beta1/fake/fake_replicaset.go | 84 +- .../v1beta1/fake/fake_thirdpartyresource.go | 68 +- .../typed/extensions/v1beta1/ingress.go | 72 +- .../extensions/v1beta1/podsecuritypolicy.go | 64 +- .../typed/extensions/v1beta1/replicaset.go | 83 +- .../extensions/v1beta1/thirdpartyresource.go | 64 +- .../kubernetes/typed/networking/v1/BUILD | 19 +- .../kubernetes/typed/networking/v1/fake/BUILD | 16 +- .../networking/v1/fake/fake_networkpolicy.go | 88 +- .../typed/networking/v1/networkpolicy.go | 70 +- .../kubernetes/typed/policy/v1beta1/BUILD | 19 +- .../typed/policy/v1beta1/fake/BUILD | 16 +- .../v1beta1/fake/fake_poddisruptionbudget.go | 84 +- .../policy/v1beta1/poddisruptionbudget.go | 72 +- .../kubernetes/typed/rbac/v1}/BUILD | 9 +- .../kubernetes/typed/rbac/v1}/clusterrole.go | 106 +- .../typed/rbac/v1}/clusterrolebinding.go | 106 +- .../kubernetes/typed/rbac}/v1/doc.go | 0 .../kubernetes/typed/rbac/v1/fake}/BUILD | 24 +- .../kubernetes/typed/rbac/v1/fake}/doc.go | 4 +- .../typed/rbac/v1/fake/fake_clusterrole.go | 118 + .../rbac/v1/fake/fake_clusterrolebinding.go | 118 + .../typed/rbac/v1/fake/fake_rbac_client.go | 50 + .../typed/rbac/v1/fake/fake_role.go | 126 + .../typed/rbac/v1/fake/fake_rolebinding.go | 126 + .../typed/rbac/v1}/generated_expansion.go | 2 +- .../kubernetes/typed/rbac/v1}/rbac_client.go | 40 +- .../kubernetes/typed/rbac/v1}/role.go | 112 +- .../kubernetes/typed/rbac/v1}/rolebinding.go | 112 +- .../kubernetes/typed/rbac/v1alpha1/BUILD | 19 +- .../typed/rbac/v1alpha1/clusterrole.go | 64 +- .../typed/rbac/v1alpha1/clusterrolebinding.go | 64 +- .../kubernetes/typed/rbac/v1alpha1/fake/BUILD | 16 +- .../rbac/v1alpha1/fake/fake_clusterrole.go | 68 +- .../v1alpha1/fake/fake_clusterrolebinding.go | 68 +- .../typed/rbac/v1alpha1/fake/fake_role.go | 74 +- .../rbac/v1alpha1/fake/fake_rolebinding.go | 74 +- .../kubernetes/typed/rbac/v1alpha1/role.go | 70 +- .../typed/rbac/v1alpha1/rolebinding.go | 70 +- .../kubernetes/typed/rbac/v1beta1/BUILD | 19 +- .../typed/rbac/v1beta1/clusterrole.go | 64 +- .../typed/rbac/v1beta1/clusterrolebinding.go | 64 +- .../kubernetes/typed/rbac/v1beta1/fake/BUILD | 16 +- .../rbac/v1beta1/fake/fake_clusterrole.go | 68 +- .../v1beta1/fake/fake_clusterrolebinding.go | 68 +- .../typed/rbac/v1beta1/fake/fake_role.go | 74 +- .../rbac/v1beta1/fake/fake_rolebinding.go | 74 +- .../kubernetes/typed/rbac/v1beta1/role.go | 70 +- .../typed/rbac/v1beta1/rolebinding.go | 70 +- .../typed/scheduling/v1alpha1}/BUILD | 13 +- .../typed/scheduling}/v1alpha1/doc.go | 0 .../typed/scheduling/v1alpha1/fake/BUILD | 39 + .../typed/scheduling/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_priorityclass.go | 118 + .../v1alpha1/fake/fake_scheduling_client.go} | 28 +- .../v1alpha1/generated_expansion.go | 2 +- .../scheduling/v1alpha1/priorityclass.go | 145 + .../scheduling/v1alpha1/scheduling_client.go} | 34 +- .../kubernetes/typed/settings/v1alpha1/BUILD | 19 +- .../typed/settings/v1alpha1/fake/BUILD | 16 +- .../settings/v1alpha1/fake/fake_podpreset.go | 74 +- .../typed/settings/v1alpha1/podpreset.go | 70 +- .../kubernetes/typed/storage/v1/BUILD | 19 +- .../kubernetes/typed/storage/v1/fake/BUILD | 16 +- .../storage/v1/fake/fake_storageclass.go | 88 +- .../typed/storage/v1/storageclass.go | 64 +- .../kubernetes/typed/storage/v1beta1/BUILD | 19 +- .../typed/storage/v1beta1/fake/BUILD | 16 +- .../storage/v1beta1/fake/fake_storageclass.go | 68 +- .../typed/storage/v1beta1/storageclass.go | 64 +- vendor/k8s.io/client-go/pkg/api/v1/ref/BUILD | 19 - vendor/k8s.io/client-go/pkg/api/v1/ref/ref.go | 121 - vendor/k8s.io/client-go/pkg/version/BUILD | 16 +- vendor/k8s.io/client-go/pkg/version/base.go | 6 +- vendor/k8s.io/client-go/pkg/version/doc.go | 1 + .../plugin/pkg/client/auth/gcp/BUILD | 18 +- .../plugin/pkg/client/auth/gcp/gcp.go | 5 + vendor/k8s.io/client-go/rest/BUILD | 27 +- vendor/k8s.io/client-go/rest/config.go | 8 + vendor/k8s.io/client-go/rest/config_test.go | 4 + vendor/k8s.io/client-go/rest/request.go | 227 +- vendor/k8s.io/client-go/rest/request_test.go | 40 +- vendor/k8s.io/client-go/rest/transport.go | 2 + vendor/k8s.io/client-go/rest/watch/BUILD | 17 +- .../client-go/rest/zz_generated.deepcopy.go | 69 + vendor/k8s.io/client-go/testing/BUILD | 16 +- vendor/k8s.io/client-go/testing/actions.go | 50 + .../third_party/forked/golang/template/BUILD | 14 +- vendor/k8s.io/client-go/tools/auth/BUILD | 17 +- vendor/k8s.io/client-go/tools/cache/BUILD | 25 +- vendor/k8s.io/client-go/tools/cache/OWNERS | 11 +- .../client-go/tools/cache/controller.go | 5 +- vendor/k8s.io/client-go/tools/cache/heap.go | 323 + .../k8s.io/client-go/tools/cache/heap_test.go | 382 + .../k8s.io/client-go/tools/cache/listwatch.go | 15 +- .../client-go/tools/cache/mutation_cache.go | 2 +- .../tools/cache/mutation_detector.go | 18 +- .../tools/cache/processor_listener_test.go | 53 +- .../k8s.io/client-go/tools/cache/reflector.go | 59 +- .../tools/cache/reflector_metrics.go | 119 + .../client-go/tools/cache/reflector_test.go | 2 +- .../client-go/tools/cache/shared_informer.go | 170 +- .../client-go/tools/cache/testing/BUILD | 17 +- vendor/k8s.io/client-go/tools/clientcmd/BUILD | 23 +- .../client-go/tools/clientcmd/api/BUILD | 24 +- .../tools/clientcmd/api/doc.go} | 5 +- .../tools/clientcmd/api/latest/BUILD | 16 +- .../client-go/tools/clientcmd/api/types.go | 1 + .../client-go/tools/clientcmd/api/v1/BUILD | 18 +- .../tools/clientcmd/api/v1/doc.go} | 5 +- .../client-go/tools/clientcmd/api/v1/types.go | 1 + .../clientcmd/api/v1/zz_generated.deepcopy.go | 358 + .../clientcmd/api/zz_generated.deepcopy.go | 309 + .../k8s.io/client-go/tools/clientcmd/flag.go | 49 + .../client-go/tools/clientcmd/overrides.go | 25 +- .../tools/clientcmd/overrides_test.go | 50 + vendor/k8s.io/client-go/tools/metrics/BUILD | 14 +- vendor/k8s.io/client-go/tools/pager/BUILD | 51 + vendor/k8s.io/client-go/tools/pager/pager.go | 118 + .../client-go/tools/pager/pager_test.go | 206 + vendor/k8s.io/client-go/tools/record/BUILD | 22 +- vendor/k8s.io/client-go/tools/record/event.go | 2 +- .../client-go/tools/record/event_test.go | 23 +- .../client-go/tools/record/events_cache.go | 98 +- .../tools/record/events_cache_test.go | 28 +- .../ref => client-go/tools/reference}/BUILD | 4 +- .../ref => client-go/tools/reference}/ref.go | 7 +- .../client-go/tools/remotecommand/BUILD | 19 +- .../tools/remotecommand/remotecommand.go | 128 +- vendor/k8s.io/client-go/transport/BUILD | 23 +- vendor/k8s.io/client-go/transport/cache.go | 12 +- vendor/k8s.io/client-go/transport/config.go | 12 +- .../client-go/transport/round_trippers.go | 31 + .../transport/round_trippers_test.go | 61 + vendor/k8s.io/client-go/transport/spdy/BUILD | 29 + .../k8s.io/client-go/transport/spdy/spdy.go | 94 + vendor/k8s.io/client-go/util/cert/BUILD | 24 +- vendor/k8s.io/client-go/util/cert/io.go | 20 +- vendor/k8s.io/client-go/util/cert/pem.go | 143 +- vendor/k8s.io/client-go/util/cert/pem_test.go | 197 + vendor/k8s.io/client-go/util/exec/BUILD | 14 +- .../k8s.io/client-go/util/flowcontrol/BUILD | 17 +- vendor/k8s.io/client-go/util/homedir/BUILD | 14 +- vendor/k8s.io/client-go/util/integer/BUILD | 15 +- vendor/k8s.io/client-go/util/jsonpath/BUILD | 17 +- .../client-go/util/jsonpath/jsonpath.go | 42 +- .../client-go/util/jsonpath/jsonpath_test.go | 167 +- vendor/k8s.io/client-go/util/jsonpath/node.go | 4 +- .../k8s.io/client-go/util/jsonpath/parser.go | 92 +- .../client-go/util/jsonpath/parser_test.go | 16 + .../pkg/client => client-go/util}/retry/BUILD | 4 - .../client => client-go/util}/retry/OWNERS | 0 .../client => client-go/util}/retry/util.go | 0 .../util}/retry/util_test.go | 0 vendor/k8s.io/client-go/util/testing/BUILD | 15 +- vendor/k8s.io/kube-openapi/.gitignore | 20 + vendor/k8s.io/kube-openapi/.travis.yml | 4 + .../loads => k8s.io/kube-openapi}/LICENSE | 0 vendor/k8s.io/kube-openapi/OWNERS | 7 + vendor/k8s.io/kube-openapi/README.md | 14 + .../pkg/common}/common.go | 10 +- .../batch => kube-openapi/pkg/common}/doc.go | 6 +- vendor/k8s.io/kubernetes/.gazelcfg.json | 20 - vendor/k8s.io/kubernetes/.gitignore | 3 + vendor/k8s.io/kubernetes/.kazelcfg.json | 1 + vendor/k8s.io/kubernetes/CHANGELOG-1.8.md | 89 + vendor/k8s.io/kubernetes/CHANGELOG.md | 7772 -- vendor/k8s.io/kubernetes/OWNERS | 2 +- vendor/k8s.io/kubernetes/OWNERS_ALIASES | 122 +- vendor/k8s.io/kubernetes/README.md | 2 +- vendor/k8s.io/kubernetes/SUPPORT.md | 39 + vendor/k8s.io/kubernetes/labels.yaml | 40 + vendor/k8s.io/kubernetes/pkg/api/BUILD | 28 +- vendor/k8s.io/kubernetes/pkg/api/OWNERS | 6 +- .../pkg/api/annotation_key_constants.go | 18 +- .../kubernetes/pkg/api/conversion_test.go | 4 +- vendor/k8s.io/kubernetes/pkg/api/copy_test.go | 22 +- .../kubernetes/pkg/api/deep_copy_test.go | 18 +- .../kubernetes/pkg/api/defaulting_test.go | 56 +- vendor/k8s.io/kubernetes/pkg/api/helper/BUILD | 4 - .../kubernetes/pkg/api/helper/helpers.go | 67 +- .../kubernetes/pkg/api/helper/helpers_test.go | 137 +- .../k8s.io/kubernetes/pkg/api/install/BUILD | 4 - .../kubernetes/pkg/api/install/install.go | 1 - vendor/k8s.io/kubernetes/pkg/api/register.go | 1 + vendor/k8s.io/kubernetes/pkg/api/resource.go | 4 +- .../pkg/api/serialization_proto_test.go | 4 +- .../kubernetes/pkg/api/serialization_test.go | 25 +- .../k8s.io/kubernetes/pkg/api/service/BUILD | 7 - .../k8s.io/kubernetes/pkg/api/service/util.go | 64 +- .../kubernetes/pkg/api/service/util_test.go | 203 +- vendor/k8s.io/kubernetes/pkg/api/types.go | 404 +- .../kubernetes/pkg/api/unstructured_test.go | 22 +- vendor/k8s.io/kubernetes/pkg/api/util/BUILD | 4 - vendor/k8s.io/kubernetes/pkg/api/v1/BUILD | 12 +- vendor/k8s.io/kubernetes/pkg/api/v1/OWNERS | 4 +- .../kubernetes/pkg/api/v1/conversion.go | 290 +- .../k8s.io/kubernetes/pkg/api/v1/defaults.go | 29 +- .../kubernetes/pkg/api/v1/defaults_test.go | 275 +- .../k8s.io/kubernetes/pkg/api/v1/helper/BUILD | 7 +- .../kubernetes/pkg/api/v1/helper/helpers.go | 147 +- .../pkg/api/v1/helper/helpers_test.go | 59 + vendor/k8s.io/kubernetes/pkg/api/v1/pod/BUILD | 4 - .../k8s.io/kubernetes/pkg/api/v1/pod/util.go | 66 +- .../kubernetes/pkg/api/v1/pod/util_test.go | 50 - .../pkg/api/v1/{builder.go => register.go} | 0 .../kubernetes/pkg/api/v1/service/BUILD | 7 - .../kubernetes/pkg/api/v1/service/util.go | 68 +- .../pkg/api/v1/service/util_test.go | 203 +- .../pkg/api/v1/zz_generated.conversion.go | 447 +- .../pkg/api/v1/zz_generated.defaults.go | 12 + .../kubernetes/pkg/api/validation/BUILD | 33 +- .../kubernetes/pkg/api/validation/OWNERS | 5 +- .../kubernetes/pkg/api/validation/schema.go | 435 - .../pkg/api/validation/schema_test.go | 424 - .../pkg/api/validation/validation.go | 613 +- .../pkg/api/validation/validation_test.go | 1763 +- .../pkg/api/zz_generated.deepcopy.go | 7854 +- vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD | 45 - vendor/k8s.io/kubernetes/pkg/apis/apps/OWNERS | 21 - .../kubernetes/pkg/apis/apps/install/BUILD | 35 - .../pkg/apis/apps/install/install.go | 49 - .../k8s.io/kubernetes/pkg/apis/apps/types.go | 229 - .../kubernetes/pkg/apis/apps/v1beta1/BUILD | 65 - .../pkg/apis/apps/v1beta1/conversion.go | 345 - .../pkg/apis/apps/v1beta1/defaults.go | 118 - .../pkg/apis/apps/v1beta1/defaults_test.go | 222 - .../kubernetes/pkg/apis/apps/v1beta1/doc.go | 22 - .../pkg/apis/apps/v1beta1/register.go | 45 - .../apps/v1beta1/zz_generated.conversion.go | 324 - .../apps/v1beta1/zz_generated.defaults.go | 327 - .../pkg/apis/apps/zz_generated.deepcopy.go | 208 - .../kubernetes/pkg/apis/authentication/OWNERS | 9 - .../kubernetes/pkg/apis/authentication/doc.go | 19 - .../pkg/apis/authentication/install/BUILD | 37 - .../apis/authentication/install/install.go | 53 - .../pkg/apis/authentication/register.go | 50 - .../pkg/apis/authentication/types.go | 89 - .../pkg/apis/authentication/v1/BUILD | 41 - .../pkg/apis/authentication/v1/doc.go | 22 - .../pkg/apis/authentication/v1/register.go | 45 - .../v1/zz_generated.conversion.go | 154 - .../pkg/apis/authentication/v1beta1/BUILD | 41 - .../apis/authentication/v1beta1/conversion.go | 26 - .../apis/authentication/v1beta1/defaults.go | 25 - .../pkg/apis/authentication/v1beta1/doc.go | 22 - .../apis/authentication/v1beta1/register.go | 45 - .../v1beta1/zz_generated.conversion.go | 154 - .../v1beta1/zz_generated.defaults.go | 32 - .../authentication/zz_generated.deepcopy.go | 110 - .../kubernetes/pkg/apis/authorization/OWNERS | 17 - .../kubernetes/pkg/apis/authorization/doc.go | 19 - .../pkg/apis/authorization/install/BUILD | 37 - .../pkg/apis/authorization/install/install.go | 53 - .../pkg/apis/authorization/register.go | 52 - .../pkg/apis/authorization/types.go | 146 - .../pkg/apis/authorization/v1/BUILD | 41 - .../pkg/apis/authorization/v1/conversion.go | 26 - .../pkg/apis/authorization/v1/doc.go | 23 - .../pkg/apis/authorization/v1/register.go | 45 - .../v1/zz_generated.conversion.go | 280 - .../authorization/v1/zz_generated.defaults.go | 32 - .../pkg/apis/authorization/v1beta1/BUILD | 41 - .../apis/authorization/v1beta1/defaults.go | 25 - .../pkg/apis/authorization/v1beta1/doc.go | 23 - .../apis/authorization/v1beta1/register.go | 45 - .../v1beta1/zz_generated.conversion.go | 280 - .../authorization/zz_generated.deepcopy.go | 187 - .../kubernetes/pkg/apis/autoscaling/BUILD | 47 - .../kubernetes/pkg/apis/autoscaling/OWNERS | 20 - .../pkg/apis/autoscaling/annotations.go | 34 - .../pkg/apis/autoscaling/install/BUILD | 36 - .../pkg/apis/autoscaling/install/install.go | 51 - .../pkg/apis/autoscaling/register.go | 53 - .../kubernetes/pkg/apis/autoscaling/types.go | 358 - .../kubernetes/pkg/apis/autoscaling/v1/BUILD | 60 - .../pkg/apis/autoscaling/v1/conversion.go | 277 - .../pkg/apis/autoscaling/v1/defaults.go | 36 - .../pkg/apis/autoscaling/v1/defaults_test.go | 93 - .../kubernetes/pkg/apis/autoscaling/v1/doc.go | 22 - .../pkg/apis/autoscaling/v1/register.go | 45 - .../autoscaling/v1/zz_generated.conversion.go | 507 - .../autoscaling/v1/zz_generated.defaults.go | 48 - .../pkg/apis/autoscaling/v2alpha1/defaults.go | 48 - .../pkg/apis/autoscaling/v2alpha1/doc.go | 22 - .../pkg/apis/autoscaling/v2alpha1/register.go | 45 - .../v2alpha1/zz_generated.conversion.go | 450 - .../v2alpha1/zz_generated.defaults.go | 50 - .../apis/autoscaling/zz_generated.deepcopy.go | 357 - vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD | 45 - .../k8s.io/kubernetes/pkg/apis/batch/OWNERS | 19 - .../kubernetes/pkg/apis/batch/install/BUILD | 36 - .../pkg/apis/batch/install/install.go | 51 - .../k8s.io/kubernetes/pkg/apis/batch/types.go | 285 - .../pkg/apis/batch/v1/conversion.go | 86 - .../kubernetes/pkg/apis/batch/v1/defaults.go | 45 - .../pkg/apis/batch/v1/defaults_test.go | 225 - .../kubernetes/pkg/apis/batch/v1/doc.go | 22 - .../kubernetes/pkg/apis/batch/v1/register.go | 45 - .../apis/batch/v1/zz_generated.conversion.go | 212 - .../apis/batch/v1/zz_generated.defaults.go | 177 - .../kubernetes/pkg/apis/batch/v2alpha1/BUILD | 61 - .../pkg/apis/batch/v2alpha1/conversion.go | 44 - .../pkg/apis/batch/v2alpha1/defaults.go | 35 - .../pkg/apis/batch/v2alpha1/defaults_test.go | 104 - .../kubernetes/pkg/apis/batch/v2alpha1/doc.go | 22 - .../pkg/apis/batch/v2alpha1/register.go | 45 - .../batch/v2alpha1/zz_generated.conversion.go | 240 - .../batch/v2alpha1/zz_generated.defaults.go | 311 - .../pkg/apis/batch/zz_generated.deepcopy.go | 302 - .../kubernetes/pkg/apis/certificates/BUILD | 44 - .../kubernetes/pkg/apis/certificates/OWNERS | 14 - .../kubernetes/pkg/apis/certificates/doc.go | 19 - .../pkg/apis/certificates/helpers.go | 38 - .../pkg/apis/certificates/install/BUILD | 36 - .../pkg/apis/certificates/install/install.go | 51 - .../kubernetes/pkg/apis/certificates/types.go | 143 - .../pkg/apis/certificates/v1beta1/BUILD | 42 - .../apis/certificates/v1beta1/conversion.go | 38 - .../pkg/apis/certificates/v1beta1/defaults.go | 31 - .../pkg/apis/certificates/v1beta1/doc.go | 23 - .../pkg/apis/certificates/v1beta1/helpers.go | 40 - .../pkg/apis/certificates/v1beta1/register.go | 50 - .../v1beta1/zz_generated.conversion.go | 190 - .../v1beta1/zz_generated.defaults.go | 50 - .../certificates/zz_generated.deepcopy.go | 155 - .../kubernetes/pkg/apis/extensions/BUILD | 6 +- .../kubernetes/pkg/apis/extensions/OWNERS | 4 +- .../pkg/apis/extensions/install/BUILD | 36 - .../pkg/apis/extensions/install/install.go | 51 - .../pkg/apis/extensions/register.go | 5 +- .../kubernetes/pkg/apis/extensions/types.go | 191 +- .../pkg/apis/extensions/v1beta1/BUILD | 67 - .../pkg/apis/extensions/v1beta1/conversion.go | 431 - .../pkg/apis/extensions/v1beta1/defaults.go | 136 - .../apis/extensions/v1beta1/defaults_test.go | 536 - .../pkg/apis/extensions/v1beta1/doc.go | 25 - .../pkg/apis/extensions/v1beta1/register.go | 45 - .../v1beta1/zz_generated.conversion.go | 1716 - .../v1beta1/zz_generated.defaults.go | 476 - .../apis/extensions/zz_generated.deepcopy.go | 2141 +- .../kubernetes/pkg/apis/networking/BUILD | 4 +- .../kubernetes/pkg/apis/networking/types.go | 78 +- .../apis/networking/zz_generated.deepcopy.go | 353 +- .../k8s.io/kubernetes/pkg/apis/policy/OWNERS | 4 - .../k8s.io/kubernetes/pkg/apis/policy/doc.go | 19 - .../kubernetes/pkg/apis/policy/install/BUILD | 35 - .../pkg/apis/policy/install/install.go | 49 - .../kubernetes/pkg/apis/policy/register.go | 54 - .../kubernetes/pkg/apis/policy/types.go | 120 - .../kubernetes/pkg/apis/policy/v1beta1/BUILD | 41 - .../kubernetes/pkg/apis/policy/v1beta1/doc.go | 25 - .../pkg/apis/policy/v1beta1/register.go | 45 - .../policy/v1beta1/zz_generated.conversion.go | 186 - .../policy/v1beta1/zz_generated.defaults.go | 32 - .../pkg/apis/policy/zz_generated.deepcopy.go | 153 - vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD | 46 - vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS | 17 - .../kubernetes/pkg/apis/rbac/helpers.go | 399 - .../kubernetes/pkg/apis/rbac/install/BUILD | 37 - .../pkg/apis/rbac/install/install.go | 53 - .../kubernetes/pkg/apis/rbac/v1alpha1/BUILD | 56 - .../pkg/apis/rbac/v1alpha1/conversion.go | 82 - .../pkg/apis/rbac/v1alpha1/conversion_test.go | 106 - .../pkg/apis/rbac/v1alpha1/defaults.go | 49 - .../kubernetes/pkg/apis/rbac/v1alpha1/doc.go | 23 - .../pkg/apis/rbac/v1alpha1/helpers.go | 150 - .../pkg/apis/rbac/v1alpha1/register.go | 44 - .../rbac/v1alpha1/zz_generated.conversion.go | 418 - .../rbac/v1alpha1/zz_generated.defaults.go | 69 - .../kubernetes/pkg/apis/rbac/v1beta1/BUILD | 42 - .../pkg/apis/rbac/v1beta1/defaults.go | 49 - .../kubernetes/pkg/apis/rbac/v1beta1/doc.go | 23 - .../pkg/apis/rbac/v1beta1/helpers.go | 150 - .../pkg/apis/rbac/v1beta1/register.go | 44 - .../rbac/v1beta1/zz_generated.conversion.go | 364 - .../rbac/v1beta1/zz_generated.defaults.go | 67 - .../pkg/apis/rbac/zz_generated.deepcopy.go | 269 - .../kubernetes/pkg/apis/settings/doc.go | 19 - .../pkg/apis/settings/install/BUILD | 35 - .../pkg/apis/settings/install/install.go | 49 - .../kubernetes/pkg/apis/settings/register.go | 52 - .../kubernetes/pkg/apis/settings/types.go | 63 - .../pkg/apis/settings/v1alpha1/BUILD | 42 - .../pkg/apis/settings/v1alpha1/doc.go | 23 - .../pkg/apis/settings/v1alpha1/register.go | 45 - .../v1alpha1/zz_generated.conversion.go | 166 - .../v1alpha1/zz_generated.defaults.go | 99 - .../apis/settings/zz_generated.deepcopy.go | 127 - .../k8s.io/kubernetes/pkg/apis/storage/BUILD | 45 - .../k8s.io/kubernetes/pkg/apis/storage/OWNERS | 3 - .../k8s.io/kubernetes/pkg/apis/storage/doc.go | 19 - .../kubernetes/pkg/apis/storage/install/BUILD | 37 - .../pkg/apis/storage/install/install.go | 53 - .../kubernetes/pkg/apis/storage/register.go | 51 - .../kubernetes/pkg/apis/storage/types.go | 60 - .../kubernetes/pkg/apis/storage/v1/BUILD | 42 - .../kubernetes/pkg/apis/storage/v1/doc.go | 22 - .../pkg/apis/storage/v1/register.go | 45 - .../storage/v1/zz_generated.conversion.go | 94 - .../apis/storage/v1/zz_generated.defaults.go | 32 - .../kubernetes/pkg/apis/storage/v1beta1/BUILD | 42 - .../pkg/apis/storage/v1beta1/doc.go | 22 - .../pkg/apis/storage/v1beta1/register.go | 45 - .../v1beta1/zz_generated.conversion.go | 94 - .../storage/v1beta1/zz_generated.defaults.go | 32 - .../pkg/apis/storage/zz_generated.deepcopy.go | 82 - .../k8s.io/kubernetes/pkg/capabilities/BUILD | 9 +- .../pkg/capabilities/capabilities.go | 27 +- .../pkg/capabilities/capabilities_test.go | 50 + .../clientset_generated/clientset/BUILD | 94 - .../clientset/clientset.go | 570 - .../clientset/import_known_versions.go | 42 - .../clientset/scheme/BUILD | 56 - .../clientset/scheme/doc.go | 20 - .../clientset/scheme/register.go | 91 - .../admissionregistration/v1alpha1/BUILD | 45 - .../v1alpha1/admissionregistration_client.go | 93 - .../externaladmissionhookconfiguration.go | 145 - .../v1alpha1/generated_expansion.go | 21 - .../v1alpha1/initializerconfiguration.go | 145 - .../clientset/typed/authentication/v1/BUILD | 42 - .../v1/authentication_client.go | 88 - .../authentication/v1/generated_expansion.go | 17 - .../typed/authentication/v1beta1/BUILD | 42 - .../v1beta1/authentication_client.go | 88 - .../v1beta1/tokenreview_expansion.go | 35 - .../clientset/typed/authorization/v1/BUILD | 46 - .../authorization/v1/authorization_client.go | 98 - .../v1/localsubjectaccessreview.go | 46 - .../v1/selfsubjectaccessreview.go | 44 - .../authorization/v1/subjectaccessreview.go | 44 - .../v1/subjectaccessreview_expansion.go | 36 - .../typed/authorization/v1beta1/BUILD | 46 - .../v1beta1/authorization_client.go | 98 - .../typed/authorization/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 17 - .../v1beta1/localsubjectaccessreview.go | 46 - .../localsubjectaccessreview_expansion.go | 36 - .../v1beta1/selfsubjectaccessreview.go | 44 - .../v1beta1/subjectaccessreview.go | 44 - .../v1beta1/subjectaccessreview_expansion.go | 36 - .../autoscaling/v1/autoscaling_client.go | 88 - .../autoscaling/v1/horizontalpodautoscaler.go | 172 - .../v2alpha1/autoscaling_client.go | 88 - .../typed/autoscaling/v2alpha1/doc.go | 20 - .../v2alpha1/horizontalpodautoscaler.go | 172 - .../clientset/typed/batch/v1/batch_client.go | 88 - .../clientset/typed/batch/v1/job.go | 172 - .../clientset/typed/batch/v2alpha1/doc.go | 20 - .../typed/certificates/v1beta1/BUILD | 45 - .../v1beta1/certificates_client.go | 88 - .../v1beta1/certificatesigningrequest.go | 161 - .../certificatesigningrequest_expansion.go | 37 - .../typed/certificates/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 17 - .../clientset/typed/core/v1/BUILD | 69 - .../typed/core/v1/componentstatus.go | 145 - .../clientset/typed/core/v1/configmap.go | 155 - .../clientset/typed/core/v1/core_client.go | 163 - .../clientset/typed/core/v1/endpoints.go | 155 - .../clientset/typed/core/v1/event.go | 155 - .../typed/core/v1/event_expansion.go | 164 - .../typed/core/v1/generated_expansion.go | 39 - .../clientset/typed/core/v1/limitrange.go | 155 - .../clientset/typed/core/v1/namespace.go | 161 - .../typed/core/v1/namespace_expansion.go | 31 - .../clientset/typed/core/v1/node.go | 161 - .../clientset/typed/core/v1/node_expansion.go | 43 - .../typed/core/v1/persistentvolume.go | 161 - .../typed/core/v1/persistentvolumeclaim.go | 172 - .../clientset/typed/core/v1/pod.go | 172 - .../clientset/typed/core/v1/pod_expansion.go | 45 - .../clientset/typed/core/v1/podtemplate.go | 155 - .../typed/core/v1/replicationcontroller.go | 172 - .../clientset/typed/core/v1/resourcequota.go | 172 - .../clientset/typed/core/v1/secret.go | 155 - .../clientset/typed/core/v1/service.go | 172 - .../typed/core/v1/service_expansion.go | 41 - .../clientset/typed/core/v1/serviceaccount.go | 155 - .../clientset/typed/extensions/v1beta1/BUILD | 54 - .../typed/extensions/v1beta1/deployment.go | 172 - .../v1beta1/deployment_expansion.go | 29 - .../clientset/typed/extensions/v1beta1/doc.go | 20 - .../extensions/v1beta1/extensions_client.go | 118 - .../extensions/v1beta1/generated_expansion.go | 27 - .../typed/extensions/v1beta1/ingress.go | 172 - .../extensions/v1beta1/podsecuritypolicy.go | 145 - .../typed/extensions/v1beta1/scale.go | 46 - .../extensions/v1beta1/scale_expansion.go | 65 - .../extensions/v1beta1/thirdpartyresource.go | 145 - .../clientset/typed/networking/v1/BUILD | 44 - .../clientset/typed/networking/v1/doc.go | 20 - .../networking/v1/generated_expansion.go | 19 - .../typed/networking/v1/networking_client.go | 88 - .../typed/networking/v1/networkpolicy.go | 155 - .../clientset/typed/policy/v1beta1/BUILD | 46 - .../clientset/typed/policy/v1beta1/doc.go | 20 - .../typed/policy/v1beta1/eviction.go | 46 - .../policy/v1beta1/eviction_expansion.go | 38 - .../policy/v1beta1/generated_expansion.go | 19 - .../policy/v1beta1/poddisruptionbudget.go | 172 - .../typed/policy/v1beta1/policy_client.go | 93 - .../clientset/typed/rbac/v1alpha1/BUILD | 47 - .../typed/rbac/v1alpha1/clusterrole.go | 145 - .../typed/rbac/v1alpha1/clusterrolebinding.go | 145 - .../clientset/typed/rbac/v1alpha1/doc.go | 20 - .../typed/rbac/v1alpha1/rbac_client.go | 103 - .../clientset/typed/rbac/v1alpha1/role.go | 155 - .../typed/rbac/v1alpha1/rolebinding.go | 155 - .../clientset/typed/rbac/v1beta1/doc.go | 20 - .../clientset/typed/settings/v1alpha1/BUILD | 44 - .../clientset/typed/settings/v1alpha1/doc.go | 20 - .../typed/settings/v1alpha1/podpreset.go | 155 - .../clientset/typed/storage/v1/doc.go | 20 - .../typed/storage/v1/generated_expansion.go | 19 - .../typed/storage/v1/storage_client.go | 88 - .../typed/storage/v1/storageclass.go | 145 - .../clientset/typed/storage/v1beta1/BUILD | 44 - .../clientset/typed/storage/v1beta1/doc.go | 20 - .../storage/v1beta1/generated_expansion.go | 19 - .../typed/storage/v1beta1/storage_client.go | 88 - .../typed/storage/v1beta1/storageclass.go | 145 - .../k8s.io/kubernetes/pkg/cloudprovider/BUILD | 3 - .../kubernetes/pkg/cloudprovider/OWNERS | 3 +- .../kubernetes/pkg/cloudprovider/cloud.go | 25 +- vendor/k8s.io/kubernetes/pkg/controller/BUILD | 82 +- .../k8s.io/kubernetes/pkg/controller/OWNERS | 1 - .../pkg/controller/client_builder.go | 6 +- .../pkg/controller/controller_ref_manager.go | 145 +- .../controller/controller_ref_manager_test.go | 33 +- .../pkg/controller/controller_utils.go | 159 +- .../pkg/controller/controller_utils_test.go | 373 +- .../kubernetes/pkg/credentialprovider/BUILD | 8 +- .../pkg/credentialprovider/aws/BUILD | 4 - .../pkg/credentialprovider/keyring.go | 2 +- .../pkg/credentialprovider/keyring_test.go | 2 +- .../pkg/credentialprovider/provider.go | 2 +- vendor/k8s.io/kubernetes/pkg/features/BUILD | 4 +- .../kubernetes/pkg/features/kube_features.go | 95 +- .../k8s.io/kubernetes/pkg/kubelet/apis/BUILD | 5 +- .../pkg/kubelet/apis/well_known_labels.go | 7 +- .../k8s.io/kubernetes/pkg/kubelet/types/BUILD | 53 + .../types/constants.go} | 17 +- .../types/doc.go} | 5 +- .../kubernetes/pkg/kubelet/types/labels.go | 40 + .../pkg/kubelet/types/labels_test.go | 119 + .../pkg/kubelet/types/pod_update.go | 159 + .../pkg/kubelet/types/pod_update_test.go | 178 + .../kubernetes/pkg/kubelet/types/types.go | 100 + .../pkg/kubelet/types/types_test.go | 138 + .../kubernetes/pkg/security/apparmor/BUILD | 8 +- .../pkg/security/apparmor/validate.go | 9 +- .../kubernetes/pkg/serviceaccount/BUILD | 11 +- .../kubernetes/pkg/serviceaccount/OWNERS | 1 + .../kubernetes/pkg/serviceaccount/jwt.go | 80 +- .../kubernetes/pkg/serviceaccount/jwt_test.go | 85 +- .../kubernetes/pkg/serviceaccount/util.go | 2 +- vendor/k8s.io/kubernetes/pkg/util/BUILD | 106 - vendor/k8s.io/kubernetes/pkg/util/doc.go | 20 - vendor/k8s.io/kubernetes/pkg/util/file/BUILD | 24 + .../k8s.io/kubernetes/pkg/util/file/file.go | 57 + .../kubernetes/pkg/util/goroutinemap/BUILD | 4 - .../goroutinemap/exponentialbackoff/BUILD | 3 - vendor/k8s.io/kubernetes/pkg/util/hash/BUILD | 4 - vendor/k8s.io/kubernetes/pkg/util/io/BUILD | 28 +- .../kubernetes/pkg/util/io/consistentread.go | 45 + vendor/k8s.io/kubernetes/pkg/util/io/io.go | 61 - .../k8s.io/kubernetes/pkg/util/io/io_test.go | 56 - .../k8s.io/kubernetes/pkg/util/io/writer.go | 30 +- vendor/k8s.io/kubernetes/pkg/util/mount/BUILD | 46 +- .../k8s.io/kubernetes/pkg/util/mount/exec.go | 50 + .../k8s.io/kubernetes/pkg/util/mount/fake.go | 12 + .../k8s.io/kubernetes/pkg/util/mount/mount.go | 130 +- .../kubernetes/pkg/util/mount/mount_linux.go | 334 +- .../pkg/util/mount/mount_linux_test.go | 203 +- .../pkg/util/mount/mount_unsupported.go | 27 +- .../pkg/util/mount/mount_windows.go | 206 + .../pkg/util/mount/mount_windows_test.go | 71 + .../pkg/util/mount/nsenter_mount.go | 109 +- .../pkg/util/mount/nsenter_mount_test.go | 67 + .../util/mount/nsenter_mount_unsupported.go | 12 + .../util/mount/safe_format_and_mount_test.go | 52 +- .../k8s.io/kubernetes/pkg/util/net/sets/BUILD | 4 - .../kubernetes/pkg/util/net/sets/ipnet.go | 10 +- .../k8s.io/kubernetes/pkg/util/parsers/BUILD | 4 - .../kubernetes/pkg/util/parsers/parsers.go | 6 +- .../pkg/util/parsers/parsers_test.go | 12 +- .../pkg/util/{exec => pointer}/BUILD | 20 +- .../kubernetes/pkg/util/pointer/pointer.go | 62 + .../{util_test.go => pointer/pointer_test.go} | 14 +- .../k8s.io/kubernetes/pkg/util/taints/BUILD | 44 + .../kubernetes/pkg/util/taints/taints.go | 331 + .../kubernetes/pkg/util/taints/taints_test.go | 113 + vendor/k8s.io/kubernetes/pkg/util/template.go | 48 - .../kubernetes/pkg/util/template_test.go | 61 - vendor/k8s.io/kubernetes/pkg/util/umask.go | 27 - .../kubernetes/pkg/util/umask_windows.go | 27 - vendor/k8s.io/kubernetes/pkg/util/util.go | 131 - .../k8s.io/kubernetes/pkg/util/version/BUILD | 4 - .../kubernetes/pkg/util/version/version.go | 28 + .../pkg/util/version/version_test.go | 73 + vendor/k8s.io/kubernetes/pkg/volume/BUILD | 33 +- vendor/k8s.io/kubernetes/pkg/volume/OWNERS | 2 - .../kubernetes/pkg/volume/metrics_du_test.go | 6 +- .../k8s.io/kubernetes/pkg/volume/plugins.go | 174 +- .../kubernetes/pkg/volume/plugins_test.go | 14 +- vendor/k8s.io/kubernetes/pkg/volume/util.go | 87 +- .../k8s.io/kubernetes/pkg/volume/util/BUILD | 55 +- .../pkg/volume/util/atomic_writer.go | 10 +- .../pkg/volume/util/device_util_linux_test.go | 6 + .../k8s.io/kubernetes/pkg/volume/util/fs.go | 7 +- .../kubernetes/pkg/volume/util/metrics.go | 63 + .../k8s.io/kubernetes/pkg/volume/util/util.go | 82 +- .../kubernetes/pkg/volume/util/util_test.go | 123 + .../k8s.io/kubernetes/pkg/volume/util_test.go | 405 +- vendor/k8s.io/kubernetes/pkg/volume/volume.go | 2 +- .../kubernetes/pkg/volume/volume_linux.go | 14 + .../pkg/volume/volume_unsupported.go | 4 + vendor/k8s.io/utils/.travis.yml | 10 + vendor/k8s.io/utils/LICENSE | 202 + vendor/k8s.io/utils/README.md | 0 vendor/k8s.io/utils/code-of-conduct.md | 5 + .../pkg/util => utils}/exec/doc.go | 4 +- .../pkg/util => utils}/exec/exec.go | 12 +- .../pkg/util => utils}/exec/exec_test.go | 2 +- .../exec => utils/exec/testing}/fake_exec.go | 24 +- 1965 files changed, 454288 insertions(+), 314004 deletions(-) delete mode 100644 vendor/github.com/Sirupsen/logrus/CHANGELOG.md delete mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter_test.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_openbsd.go delete mode 100644 vendor/github.com/Sirupsen/logrus/writer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/Gopkg.lock create mode 100644 vendor/github.com/aws/aws-sdk-go/Gopkg.toml create mode 100644 vendor/github.com/aws/aws-sdk-go/buildspec.yml create mode 100644 vendor/github.com/docker/distribution/BUILDING.md create mode 100644 vendor/github.com/docker/distribution/CHANGELOG.md create mode 100644 vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md delete mode 100644 vendor/github.com/docker/distribution/digest/digester_resumable_test.go delete mode 100644 vendor/github.com/docker/distribution/digest/verifiers_test.go rename vendor/github.com/docker/distribution/{digest => digestset}/set.go (90%) rename vendor/github.com/docker/distribution/{digest => digestset}/set_test.go (93%) create mode 100644 vendor/github.com/docker/distribution/reference/helpers.go create mode 100644 vendor/github.com/docker/distribution/reference/normalize.go create mode 100644 vendor/github.com/docker/distribution/reference/normalize_test.go create mode 100644 vendor/github.com/docker/distribution/vendor.conf rename vendor/github.com/docker/{engine-api => docker/api}/types/auth.go (100%) rename vendor/github.com/docker/{engine-api => docker/api}/types/blkiodev/blkio.go (100%) rename vendor/github.com/docker/{engine-api => docker/api}/types/client.go (52%) rename vendor/github.com/docker/{engine-api => docker/api}/types/configs.go (73%) create mode 100644 vendor/github.com/docker/docker/api/types/container/config.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_create.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_wait.go rename vendor/github.com/docker/{engine-api => docker/api}/types/container/host_config.go (88%) rename vendor/github.com/docker/{engine-api => docker/api}/types/container/hostconfig_unix.go (96%) rename vendor/github.com/docker/{engine-api => docker/api}/types/container/hostconfig_windows.go (100%) create mode 100644 vendor/github.com/docker/docker/api/types/error_response.go rename vendor/github.com/docker/{engine-api => docker/api}/types/filters/parse.go (92%) rename vendor/github.com/docker/{engine-api => docker/api}/types/filters/parse_test.go (93%) create mode 100644 vendor/github.com/docker/docker/api/types/id_response.go create mode 100644 vendor/github.com/docker/docker/api/types/image_summary.go create mode 100644 vendor/github.com/docker/docker/api/types/mount/mount.go rename vendor/github.com/docker/{engine-api => docker/api}/types/network/network.go (86%) create mode 100644 vendor/github.com/docker/docker/api/types/plugin.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_device.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_env.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_mount.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go create mode 100644 vendor/github.com/docker/docker/api/types/port.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go rename vendor/github.com/docker/{engine-api => docker/api}/types/registry/registry.go (96%) rename vendor/github.com/docker/{engine-api => docker/api}/types/seccomp.go (58%) create mode 100644 vendor/github.com/docker/docker/api/types/service_update_response.go create mode 100644 vendor/github.com/docker/docker/api/types/stats.go rename vendor/github.com/docker/{engine-api => docker/api}/types/strslice/strslice.go (100%) rename vendor/github.com/docker/{engine-api => docker/api}/types/strslice/strslice_test.go (100%) create mode 100644 vendor/github.com/docker/docker/api/types/swarm/common.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/container.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/network.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/node.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/service.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/swarm.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/task.go create mode 100644 vendor/github.com/docker/docker/api/types/types.go rename vendor/github.com/docker/{engine-api => docker/api}/types/versions/README.md (88%) rename vendor/github.com/docker/{engine-api => docker/api}/types/versions/compare.go (100%) rename vendor/github.com/docker/{engine-api => docker/api}/types/versions/compare_test.go (100%) create mode 100644 vendor/github.com/docker/docker/api/types/volume.go delete mode 100644 vendor/github.com/docker/engine-api/.travis.yml delete mode 100644 vendor/github.com/docker/engine-api/CHANGELOG.md delete mode 100644 vendor/github.com/docker/engine-api/MAINTAINERS delete mode 100644 vendor/github.com/docker/engine-api/Makefile delete mode 100644 vendor/github.com/docker/engine-api/README.md delete mode 100644 vendor/github.com/docker/engine-api/appveyor.yml delete mode 100644 vendor/github.com/docker/engine-api/client/client.go delete mode 100644 vendor/github.com/docker/engine-api/client/client_darwin.go delete mode 100644 vendor/github.com/docker/engine-api/client/client_mock_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/client_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/client_unix.go delete mode 100644 vendor/github.com/docker/engine-api/client/client_windows.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_attach.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_commit.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_commit_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_copy.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_copy_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_create.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_create_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_diff.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_diff_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_exec.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_exec_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_export.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_export_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_inspect.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_inspect_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_kill.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_kill_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_list.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_list_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_logs.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_logs_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_pause.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_pause_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_remove.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_remove_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_rename.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_rename_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_resize.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_resize_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_restart.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_restart_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_start.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_start_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_stats.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_stats_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_stop.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_stop_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_top.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_top_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_unpause.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_unpause_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_update.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_update_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_wait.go delete mode 100644 vendor/github.com/docker/engine-api/client/container_wait_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/errors.go delete mode 100644 vendor/github.com/docker/engine-api/client/events.go delete mode 100644 vendor/github.com/docker/engine-api/client/hijack.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_build.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_build_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_create.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_create_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_history.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_history_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_import.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_import_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_inspect.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_inspect_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_list.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_list_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_load.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_load_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_pull.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_pull_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_push.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_remove.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_remove_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_save.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_save_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_search.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_tag.go delete mode 100644 vendor/github.com/docker/engine-api/client/image_tag_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/info.go delete mode 100644 vendor/github.com/docker/engine-api/client/info_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/interface.go delete mode 100644 vendor/github.com/docker/engine-api/client/login.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_connect.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_connect_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_create.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_create_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_disconnect.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_disconnect_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_inspect.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_inspect_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_list.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_list_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_remove.go delete mode 100644 vendor/github.com/docker/engine-api/client/network_remove_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/request.go delete mode 100644 vendor/github.com/docker/engine-api/client/request_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go delete mode 100644 vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go delete mode 100644 vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go delete mode 100644 vendor/github.com/docker/engine-api/client/transport/client.go delete mode 100644 vendor/github.com/docker/engine-api/client/transport/transport.go delete mode 100644 vendor/github.com/docker/engine-api/client/version.go delete mode 100644 vendor/github.com/docker/engine-api/client/volume_create.go delete mode 100644 vendor/github.com/docker/engine-api/client/volume_create_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/volume_inspect.go delete mode 100644 vendor/github.com/docker/engine-api/client/volume_inspect_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/volume_list.go delete mode 100644 vendor/github.com/docker/engine-api/client/volume_list_test.go delete mode 100644 vendor/github.com/docker/engine-api/client/volume_remove.go delete mode 100644 vendor/github.com/docker/engine-api/client/volume_remove_test.go delete mode 100644 vendor/github.com/docker/engine-api/doc.go delete mode 100644 vendor/github.com/docker/engine-api/types/container/config.go delete mode 100644 vendor/github.com/docker/engine-api/types/reference/image_reference.go delete mode 100644 vendor/github.com/docker/engine-api/types/reference/image_reference_test.go delete mode 100644 vendor/github.com/docker/engine-api/types/stats.go delete mode 100644 vendor/github.com/docker/engine-api/types/time/timestamp.go delete mode 100644 vendor/github.com/docker/engine-api/types/time/timestamp_test.go delete mode 100644 vendor/github.com/docker/engine-api/types/types.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go delete mode 100644 vendor/github.com/exponent-io/jsonpath/.gitignore delete mode 100644 vendor/github.com/exponent-io/jsonpath/.travis.yml delete mode 100644 vendor/github.com/exponent-io/jsonpath/README.md delete mode 100644 vendor/github.com/exponent-io/jsonpath/decoder.go delete mode 100644 vendor/github.com/exponent-io/jsonpath/decoder_test.go delete mode 100644 vendor/github.com/exponent-io/jsonpath/example_test.go delete mode 100644 vendor/github.com/exponent-io/jsonpath/path.go delete mode 100644 vendor/github.com/exponent-io/jsonpath/path_test.go delete mode 100644 vendor/github.com/exponent-io/jsonpath/pathaction.go delete mode 100644 vendor/github.com/exponent-io/jsonpath/pathaction_test.go create mode 100644 vendor/github.com/go-ini/ini/.travis.yml create mode 100644 vendor/github.com/go-ini/ini/Makefile create mode 100644 vendor/github.com/go-ini/ini/error.go create mode 100644 vendor/github.com/go-ini/ini/key.go create mode 100644 vendor/github.com/go-ini/ini/key_test.go create mode 100644 vendor/github.com/go-ini/ini/parser.go create mode 100644 vendor/github.com/go-ini/ini/parser_test.go create mode 100644 vendor/github.com/go-ini/ini/section.go create mode 100644 vendor/github.com/go-ini/ini/section_test.go delete mode 100644 vendor/github.com/go-openapi/analysis/.drone.sec delete mode 100644 vendor/github.com/go-openapi/analysis/.drone.yml delete mode 100644 vendor/github.com/go-openapi/analysis/.gitignore delete mode 100644 vendor/github.com/go-openapi/analysis/.pullapprove.yml delete mode 100644 vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/analysis/README.md delete mode 100644 vendor/github.com/go-openapi/analysis/analyzer.go delete mode 100644 vendor/github.com/go-openapi/analysis/analyzer_test.go delete mode 100644 vendor/github.com/go-openapi/loads/.drone.sec delete mode 100644 vendor/github.com/go-openapi/loads/.drone.yml delete mode 100644 vendor/github.com/go-openapi/loads/.gitignore delete mode 100644 vendor/github.com/go-openapi/loads/.pullapprove.yml delete mode 100644 vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/loads/README.md delete mode 100644 vendor/github.com/go-openapi/loads/json_test.go delete mode 100644 vendor/github.com/go-openapi/loads/spec.go delete mode 100644 vendor/github.com/go-openapi/loads/spec_test.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/any_test.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration_test.go create mode 100755 vendor/github.com/golang/protobuf/ptypes/regen.sh create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp_test.go create mode 100644 vendor/github.com/google/btree/.travis.yml rename vendor/github.com/{go-openapi/analysis => google/btree}/LICENSE (100%) create mode 100644 vendor/github.com/google/btree/README.md create mode 100644 vendor/github.com/google/btree/btree.go create mode 100644 vendor/github.com/google/btree/btree_mem.go create mode 100644 vendor/github.com/google/btree/btree_test.go create mode 100644 vendor/github.com/googleapis/gnostic/.gitignore create mode 100755 vendor/github.com/googleapis/gnostic/.travis-install.sh create mode 100644 vendor/github.com/googleapis/gnostic/.travis.yml create mode 100755 vendor/github.com/googleapis/gnostic/COMPILE-PROTOS.sh create mode 100644 vendor/github.com/googleapis/gnostic/CONTRIBUTING.md create mode 100644 vendor/github.com/googleapis/gnostic/LICENSE create mode 100644 vendor/github.com/googleapis/gnostic/Makefile create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json create mode 100644 vendor/github.com/googleapis/gnostic/README.md create mode 100644 vendor/github.com/googleapis/gnostic/compiler/README.md create mode 100644 vendor/github.com/googleapis/gnostic/compiler/context.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/error.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/extension-handler.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/helpers.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/main.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/reader.go create mode 100755 vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh create mode 100644 vendor/github.com/googleapis/gnostic/extensions/README.md create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.pb.go create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.proto create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extensions.go create mode 100644 vendor/github.com/googleapis/gnostic/gnostic.go create mode 100644 vendor/github.com/googleapis/gnostic/gnostic_test.go create mode 100644 vendor/github.com/gregjones/httpcache/.travis.yml create mode 100644 vendor/github.com/gregjones/httpcache/LICENSE.txt create mode 100644 vendor/github.com/gregjones/httpcache/README.md create mode 100644 vendor/github.com/gregjones/httpcache/diskcache/diskcache.go create mode 100644 vendor/github.com/gregjones/httpcache/diskcache/diskcache_test.go create mode 100644 vendor/github.com/gregjones/httpcache/httpcache.go create mode 100644 vendor/github.com/gregjones/httpcache/httpcache_test.go create mode 100644 vendor/github.com/json-iterator/go/.codecov.yml create mode 100644 vendor/github.com/json-iterator/go/.gitignore create mode 100644 vendor/github.com/json-iterator/go/.travis.yml rename vendor/github.com/{exponent-io/jsonpath => json-iterator/go}/LICENSE (94%) create mode 100644 vendor/github.com/json-iterator/go/README.md create mode 100644 vendor/github.com/json-iterator/go/example_test.go create mode 100644 vendor/github.com/json-iterator/go/feature_adapter.go create mode 100644 vendor/github.com/json-iterator/go/feature_any.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_array.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_bool.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_float.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_int32.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_int64.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_invalid.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_nil.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_number.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_object.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_string.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_uint32.go create mode 100644 vendor/github.com/json-iterator/go/feature_any_uint64.go create mode 100644 vendor/github.com/json-iterator/go/feature_config.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_array.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_float.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_int.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_object.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_skip.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_skip_strict.go create mode 100644 vendor/github.com/json-iterator/go/feature_iter_string.go create mode 100644 vendor/github.com/json-iterator/go/feature_json_number.go create mode 100644 vendor/github.com/json-iterator/go/feature_pool.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_array.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_extension.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_map.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_native.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_object.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_slice.go create mode 100644 vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go create mode 100644 vendor/github.com/json-iterator/go/feature_stream.go create mode 100644 vendor/github.com/json-iterator/go/feature_stream_float.go create mode 100644 vendor/github.com/json-iterator/go/feature_stream_int.go create mode 100644 vendor/github.com/json-iterator/go/feature_stream_string.go create mode 100644 vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md create mode 100644 vendor/github.com/json-iterator/go/jsoniter.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_1dot8_only_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_adapter_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_alias_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_any_array_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_any_bool_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_any_float_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_any_int_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_any_map_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_any_null_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_any_object_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_any_string_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_array_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_bool_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_customize_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_demo_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_encode_interface_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_fixed_array_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_float_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_int_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_interface_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_invalid_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_io_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_iterator_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_large_file_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_map_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_must_be_valid_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_nested_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_null_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_object_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_optional_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_raw_message_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_reader_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_reflect_native_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_skip_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_sloppy_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_stream_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_string_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_struct_decoder_test.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter_wrap_test.go create mode 100755 vendor/github.com/json-iterator/go/test.sh create mode 100644 vendor/github.com/json-iterator/go/unmarshal_input_test.go create mode 100644 vendor/github.com/opencontainers/go-digest/.pullapprove.yml create mode 100644 vendor/github.com/opencontainers/go-digest/.travis.yml rename vendor/github.com/{docker/engine-api => opencontainers/go-digest}/CONTRIBUTING.md (67%) rename vendor/github.com/{docker/engine-api/LICENSE => opencontainers/go-digest/LICENSE.code} (99%) create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.docs create mode 100644 vendor/github.com/opencontainers/go-digest/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/go-digest/README.md rename vendor/github.com/{docker/distribution/digest/digester.go => opencontainers/go-digest/algorithm.go} (79%) create mode 100644 vendor/github.com/opencontainers/go-digest/algorithm_test.go rename vendor/github.com/{docker/distribution/digest => opencontainers/go-digest}/digest.go (75%) rename vendor/github.com/{docker/distribution/digest => opencontainers/go-digest}/digest_test.go (86%) create mode 100644 vendor/github.com/opencontainers/go-digest/digester.go rename vendor/github.com/{docker/distribution/digest => opencontainers/go-digest}/doc.go (100%) rename vendor/github.com/{docker/distribution/digest => opencontainers/go-digest}/verifiers.go (71%) create mode 100644 vendor/github.com/opencontainers/go-digest/verifiers_test.go rename vendor/github.com/{ugorji/go => peterbourgon/diskv}/LICENSE (84%) create mode 100644 vendor/github.com/peterbourgon/diskv/README.md create mode 100644 vendor/github.com/peterbourgon/diskv/basic_test.go create mode 100644 vendor/github.com/peterbourgon/diskv/compression.go create mode 100644 vendor/github.com/peterbourgon/diskv/compression_test.go create mode 100644 vendor/github.com/peterbourgon/diskv/diskv.go create mode 100644 vendor/github.com/peterbourgon/diskv/import_test.go create mode 100644 vendor/github.com/peterbourgon/diskv/index.go create mode 100644 vendor/github.com/peterbourgon/diskv/index_test.go create mode 100644 vendor/github.com/peterbourgon/diskv/issues_test.go create mode 100644 vendor/github.com/peterbourgon/diskv/keys_test.go create mode 100644 vendor/github.com/peterbourgon/diskv/speed_test.go create mode 100644 vendor/github.com/peterbourgon/diskv/stream_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/AUTHORS.md create mode 100644 vendor/github.com/prometheus/client_golang/MAINTAINERS.md delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_memstats_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go rename vendor/github.com/prometheus/client_golang/prometheus/{expvar.go => expvar_collector.go} (81%) rename vendor/github.com/prometheus/client_golang/prometheus/{expvar_test.go => expvar_collector_test.go} (98%) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_7.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer_test.go delete mode 100644 vendor/github.com/ugorji/go/README.md delete mode 100644 vendor/github.com/ugorji/go/codec/0doc.go delete mode 100644 vendor/github.com/ugorji/go/codec/README.md delete mode 100644 vendor/github.com/ugorji/go/codec/binc.go delete mode 100644 vendor/github.com/ugorji/go/codec/cbor.go delete mode 100644 vendor/github.com/ugorji/go/codec/cbor_test.go delete mode 100644 vendor/github.com/ugorji/go/codec/codec_test.go delete mode 100644 vendor/github.com/ugorji/go/codec/codecgen_test.go delete mode 100644 vendor/github.com/ugorji/go/codec/decode.go delete mode 100644 vendor/github.com/ugorji/go/codec/decode_go.go delete mode 100644 vendor/github.com/ugorji/go/codec/decode_go14.go delete mode 100644 vendor/github.com/ugorji/go/codec/encode.go delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.not.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen_15.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen_16.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen_17.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_internal.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_not_unsafe.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_test.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_unsafe.go delete mode 100644 vendor/github.com/ugorji/go/codec/json.go delete mode 100644 vendor/github.com/ugorji/go/codec/msgpack.go delete mode 100644 vendor/github.com/ugorji/go/codec/noop.go delete mode 100644 vendor/github.com/ugorji/go/codec/prebuild.go delete mode 100755 vendor/github.com/ugorji/go/codec/prebuild.sh delete mode 100644 vendor/github.com/ugorji/go/codec/py_test.go delete mode 100644 vendor/github.com/ugorji/go/codec/rpc.go delete mode 100644 vendor/github.com/ugorji/go/codec/simple.go delete mode 100644 vendor/github.com/ugorji/go/codec/test-cbor-goldens.json delete mode 100755 vendor/github.com/ugorji/go/codec/test.py delete mode 100755 vendor/github.com/ugorji/go/codec/tests.sh delete mode 100644 vendor/github.com/ugorji/go/codec/time.go delete mode 100644 vendor/github.com/ugorji/go/codec/values_test.go delete mode 100644 vendor/github.com/ugorji/go/msgpack.org.md delete mode 100644 vendor/golang.org/x/crypto/README create mode 100644 vendor/golang.org/x/crypto/README.md create mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.h create mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/net/http2/ciphers.go create mode 100644 vendor/golang.org/x/net/http2/ciphers_test.go create mode 100644 vendor/golang.org/x/net/http2/databuffer.go create mode 100644 vendor/golang.org/x/net/http2/databuffer_test.go delete mode 100644 vendor/golang.org/x/net/http2/fixed_buffer.go delete mode 100644 vendor/golang.org/x/net/http2/fixed_buffer_test.go create mode 100644 vendor/golang.org/x/net/http2/go19.go create mode 100644 vendor/golang.org/x/net/http2/go19_test.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables_test.go create mode 100644 vendor/golang.org/x/net/http2/not_go19.go create mode 100644 vendor/golang.org/x/net/idna/example_test.go create mode 100644 vendor/golang.org/x/net/idna/tables.go create mode 100644 vendor/golang.org/x/net/idna/trie.go create mode 100644 vendor/golang.org/x/net/idna/trieval.go create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_386.s create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_amd64.s create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go create mode 100644 vendor/golang.org/x/sys/windows/env_unset.go create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/race.go create mode 100644 vendor/golang.org/x/sys/windows/race0.go create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go create mode 100644 vendor/golang.org/x/sys/windows/service.go create mode 100644 vendor/golang.org/x/sys/windows/str.go create mode 100644 vendor/golang.org/x/sys/windows/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_test.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows_test.go create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/ztypes_windows.go create mode 100644 vendor/golang.org/x/sys/windows/ztypes_windows_386.go create mode 100644 vendor/golang.org/x/sys/windows/ztypes_windows_amd64.go create mode 100644 vendor/golang.org/x/text/cases/icu.go create mode 100644 vendor/golang.org/x/text/cases/icu_test.go create mode 100644 vendor/golang.org/x/text/internal/gen.go create mode 100644 vendor/golang.org/x/text/internal/gen_test.go create mode 100644 vendor/golang.org/x/text/internal/internal.go create mode 100644 vendor/golang.org/x/text/internal/internal_test.go create mode 100644 vendor/golang.org/x/text/internal/match.go create mode 100644 vendor/golang.org/x/text/internal/match_test.go create mode 100644 vendor/golang.org/x/text/internal/tables.go rename vendor/golang.org/x/text/language/{maketables.go => gen.go} (91%) rename vendor/golang.org/x/text/secure/bidirule/{go1_7_test.go => bench_test.go} (75%) delete mode 100644 vendor/golang.org/x/text/secure/bidirule/go1_6_test.go delete mode 100644 vendor/golang.org/x/text/secure/precis/go1_6_test.go delete mode 100644 vendor/golang.org/x/text/secure/precis/go1_7_test.go create mode 100644 vendor/golang.org/x/text/secure/precis/profile_test.go create mode 100644 vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json create mode 100644 vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go create mode 100644 vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json create mode 100644 vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go create mode 100644 vendor/google.golang.org/api/compute/v0.alpha/compute-api.json create mode 100644 vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go create mode 100644 vendor/google.golang.org/api/compute/v0.beta/compute-api.json create mode 100644 vendor/google.golang.org/api/compute/v0.beta/compute-gen.go create mode 100644 vendor/google.golang.org/api/dns/v1/dns-api.json create mode 100644 vendor/google.golang.org/api/dns/v1/dns-gen.go create mode 100644 vendor/google.golang.org/api/gensupport/send_test.go create mode 100644 vendor/google.golang.org/api/logging/v2beta1/logging-api.json create mode 100644 vendor/google.golang.org/api/logging/v2beta1/logging-gen.go create mode 100644 vendor/google.golang.org/api/monitoring/v3/monitoring-api.json create mode 100644 vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go create mode 100644 vendor/google.golang.org/api/pubsub/v1/pubsub-api.json create mode 100644 vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go create mode 100644 vendor/google.golang.org/appengine/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/gcfg.v1/errors.go rename vendor/{github.com/docker/engine-api/client/transport/cancellable => gopkg.in/warnings.v0}/LICENSE (83%) create mode 100644 vendor/gopkg.in/warnings.v0/README create mode 100644 vendor/gopkg.in/warnings.v0/warnings.go create mode 100644 vendor/gopkg.in/warnings.v0/warnings_test.go create mode 100644 vendor/k8s.io/api/OWNERS delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types.generated.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/types.generated.go rename vendor/k8s.io/{kubernetes/pkg/apis/policy => api/apps/v1beta2}/BUILD (67%) rename vendor/k8s.io/api/{autoscaling/v2alpha1 => apps/v1beta2}/doc.go (91%) create mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.pb.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.proto rename vendor/k8s.io/{kubernetes/pkg/apis/apps => api/apps/v1beta2}/register.go (70%) create mode 100644 vendor/k8s.io/api/apps/v1beta2/types.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/api/authorization/v1/types.generated.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/types.generated.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2alpha1/BUILD delete mode 100644 vendor/k8s.io/api/autoscaling/v2alpha1/types.generated.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2alpha1/zz_generated.deepcopy.go rename vendor/k8s.io/{kubernetes/pkg/apis/autoscaling/v2alpha1 => api/autoscaling/v2beta1}/BUILD (73%) rename vendor/k8s.io/{kubernetes/pkg/apis/autoscaling => api/autoscaling/v2beta1}/doc.go (87%) rename vendor/k8s.io/api/autoscaling/{v2alpha1 => v2beta1}/generated.pb.go (90%) rename vendor/k8s.io/api/autoscaling/{v2alpha1 => v2beta1}/generated.proto (99%) rename vendor/k8s.io/api/autoscaling/{v2alpha1 => v2beta1}/register.go (97%) rename vendor/k8s.io/api/autoscaling/{v2alpha1 => v2beta1}/types.go (98%) rename vendor/k8s.io/api/autoscaling/{v2alpha1 => v2beta1}/types_swagger_doc_generated.go (99%) create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/batch/v1/types.generated.go rename vendor/k8s.io/{kubernetes/pkg/apis/authorization => api/batch/v1beta1}/BUILD (67%) rename vendor/k8s.io/{client-go/kubernetes/typed/autoscaling/v2alpha1/generated_expansion.go => api/batch/v1beta1/doc.go} (83%) create mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.proto rename vendor/k8s.io/{kubernetes/pkg/apis/batch => api/batch/v1beta1}/register.go (69%) create mode 100644 vendor/k8s.io/api/batch/v1beta1/types.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/types.generated.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/api/core/v1/types.generated.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/api/kubernetes-sha delete mode 100644 vendor/k8s.io/api/networking/v1/types.generated.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/types.generated.go rename vendor/k8s.io/{kubernetes/pkg/apis/authentication => api/rbac/v1}/BUILD (73%) rename vendor/k8s.io/{kubernetes/pkg/apis/rbac => api/rbac/v1}/doc.go (85%) create mode 100644 vendor/k8s.io/api/rbac/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/rbac/v1/generated.proto rename vendor/k8s.io/{kubernetes/pkg/apis/rbac => api/rbac/v1}/register.go (74%) rename vendor/k8s.io/{kubernetes/pkg/apis/rbac => api/rbac/v1}/types.go (57%) create mode 100644 vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types.generated.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/types.generated.go rename vendor/k8s.io/{kubernetes/pkg/apis/settings => api/scheduling/v1alpha1}/BUILD (73%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset => api/scheduling/v1alpha1}/doc.go (78%) create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.proto rename vendor/k8s.io/{kubernetes/pkg/apis/certificates => api/scheduling/v1alpha1}/register.go (68%) create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/types.generated.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/kubernetes-sha create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion_test.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal_test.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/features/BUILD create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go delete mode 100644 vendor/k8s.io/apimachinery/kubernetes-sha delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/default.go rename vendor/k8s.io/{kubernetes/pkg/apis/batch/v1 => apimachinery/pkg/apis/meta/internalversion}/BUILD (53%) create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go rename vendor/k8s.io/{kubernetes/pkg/apis/apps => apimachinery/pkg/apis/meta/internalversion}/doc.go (85%) create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register_test.go rename vendor/k8s.io/{kubernetes/pkg/apis/authentication/v1/conversion.go => apimachinery/pkg/apis/meta/internalversion/roundtrip_test.go} (72%) create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref_test.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/generated_expansion.go => apimachinery/pkg/apis/meta/v1alpha1/conversion.go} (62%) create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter_test.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/openapi/BUILD delete mode 100644 vendor/k8s.io/apimachinery/pkg/openapi/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/until_test.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields_test.go delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go delete mode 100644 vendor/k8s.io/apiserver/kubernetes-sha create mode 100644 vendor/k8s.io/client-go/discovery/fake/discovery_test.go delete mode 100644 vendor/k8s.io/client-go/kubernetes-sha create mode 100644 vendor/k8s.io/client-go/kubernetes/import.go rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1 => client-go/kubernetes/typed/apps/v1beta2}/BUILD (78%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1 => client-go/kubernetes/typed/apps/v1beta2}/apps_client.go (59%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1 => client-go/kubernetes/typed/apps/v1beta2}/controllerrevision.go (79%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1 => client-go/kubernetes/typed/apps/v1beta2}/daemonset.go (76%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1 => client-go/kubernetes/typed/apps/v1beta2}/deployment.go (76%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1 => client-go/kubernetes/typed/apps/v1beta2}/doc.go (97%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/BUILD rename vendor/k8s.io/client-go/kubernetes/typed/{autoscaling/v2alpha1 => apps/v1beta2}/fake/doc.go (100%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/generated_expansion.go => client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go} (83%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1 => client-go/kubernetes/typed/apps/v1beta2}/generated_expansion.go (88%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1 => client-go/kubernetes/typed/apps/v1beta2}/replicaset.go (75%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1 => client-go/kubernetes/typed/apps/v1beta2}/scale.go (93%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1 => client-go/kubernetes/typed/apps/v1beta2}/statefulset.go (64%) rename vendor/k8s.io/{kubernetes/pkg/apis/authorization/v1/defaults.go => client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go} (78%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/localsubjectaccessreview_expansion.go => client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go} (54%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/tokenreview.go => client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go} (52%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/selfsubjectaccessreview_expansion.go => client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go} (62%) rename vendor/k8s.io/{kubernetes/pkg/apis/authentication/v1/defaults.go => client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go} (78%) rename vendor/k8s.io/{kubernetes/pkg/apis/authentication/v1/zz_generated.defaults.go => client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go} (54%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/tokenreview.go => client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go} (52%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go => client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go} (59%) delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/BUILD delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/doc.go rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1 => client-go/kubernetes/typed/autoscaling/v2beta1}/BUILD (66%) rename vendor/k8s.io/client-go/kubernetes/typed/autoscaling/{v2alpha1 => v2beta1}/autoscaling_client.go (62%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1 => client-go/kubernetes/typed/autoscaling/v2beta1}/doc.go (97%) rename vendor/k8s.io/client-go/kubernetes/typed/autoscaling/{v2alpha1 => v2beta1}/fake/BUILD (63%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1 => client-go/kubernetes/typed/autoscaling/v2beta1/fake}/doc.go (89%) rename vendor/k8s.io/client-go/kubernetes/typed/autoscaling/{v2alpha1 => v2beta1}/fake/fake_autoscaling_client.go (75%) rename vendor/k8s.io/client-go/kubernetes/typed/autoscaling/{v2alpha1 => v2beta1}/fake/fake_horizontalpodautoscaler.go (55%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1 => client-go/kubernetes/typed/autoscaling/v2beta1}/generated_expansion.go (97%) rename vendor/k8s.io/client-go/kubernetes/typed/autoscaling/{v2alpha1 => v2beta1}/horizontalpodautoscaler.go (76%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1 => client-go/kubernetes/typed/batch/v1beta1}/BUILD (80%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1 => client-go/kubernetes/typed/batch/v1beta1}/batch_client.go (62%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1 => client-go/kubernetes/typed/batch/v1beta1}/cronjob.go (75%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/apps => client-go/kubernetes/typed/batch}/v1beta1/doc.go (100%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1 => client-go/kubernetes/typed/batch/v1beta1/fake}/BUILD (62%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1 => client-go/kubernetes/typed/batch/v1beta1/fake}/doc.go (89%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/tokenreview_expansion.go => client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go} (53%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1 => client-go/kubernetes/typed/batch/v1beta1}/generated_expansion.go (97%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1 => client-go/kubernetes/typed/rbac/v1}/BUILD (78%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1 => client-go/kubernetes/typed/rbac/v1}/clusterrole.go (67%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1 => client-go/kubernetes/typed/rbac/v1}/clusterrolebinding.go (65%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/authentication => client-go/kubernetes/typed/rbac}/v1/doc.go (100%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1 => client-go/kubernetes/typed/rbac/v1/fake}/BUILD (56%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1 => client-go/kubernetes/typed/rbac/v1/fake}/doc.go (89%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1 => client-go/kubernetes/typed/rbac/v1}/generated_expansion.go (97%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1 => client-go/kubernetes/typed/rbac/v1}/rbac_client.go (60%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1 => client-go/kubernetes/typed/rbac/v1}/role.go (69%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1 => client-go/kubernetes/typed/rbac/v1}/rolebinding.go (68%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1 => client-go/kubernetes/typed/scheduling/v1alpha1}/BUILD (73%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration => client-go/kubernetes/typed/scheduling}/v1alpha1/doc.go (100%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/BUILD create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go rename vendor/k8s.io/{kubernetes/pkg/apis/authorization/v1beta1/zz_generated.defaults.go => client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go} (52%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/settings => client-go/kubernetes/typed/scheduling}/v1alpha1/generated_expansion.go (93%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/settings_client.go => client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go} (61%) delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/ref/BUILD delete mode 100644 vendor/k8s.io/client-go/pkg/api/v1/ref/ref.go create mode 100644 vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/tools/cache/heap.go create mode 100644 vendor/k8s.io/client-go/tools/cache/heap_test.go create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector_metrics.go rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/generated_expansion.go => client-go/tools/clientcmd/api/doc.go} (85%) rename vendor/k8s.io/{kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/generated_expansion.go => client-go/tools/clientcmd/api/v1/doc.go} (87%) create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/flag.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/overrides_test.go create mode 100644 vendor/k8s.io/client-go/tools/pager/BUILD create mode 100644 vendor/k8s.io/client-go/tools/pager/pager.go create mode 100644 vendor/k8s.io/client-go/tools/pager/pager_test.go rename vendor/k8s.io/{kubernetes/pkg/api/v1/ref => client-go/tools/reference}/BUILD (89%) rename vendor/k8s.io/{kubernetes/pkg/api/v1/ref => client-go/tools/reference}/ref.go (96%) create mode 100644 vendor/k8s.io/client-go/transport/spdy/BUILD create mode 100644 vendor/k8s.io/client-go/transport/spdy/spdy.go create mode 100644 vendor/k8s.io/client-go/util/cert/pem_test.go rename vendor/k8s.io/{kubernetes/pkg/client => client-go/util}/retry/BUILD (92%) rename vendor/k8s.io/{kubernetes/pkg/client => client-go/util}/retry/OWNERS (100%) rename vendor/k8s.io/{kubernetes/pkg/client => client-go/util}/retry/util.go (100%) rename vendor/k8s.io/{kubernetes/pkg/client => client-go/util}/retry/util_test.go (100%) create mode 100644 vendor/k8s.io/kube-openapi/.gitignore create mode 100644 vendor/k8s.io/kube-openapi/.travis.yml rename vendor/{github.com/go-openapi/loads => k8s.io/kube-openapi}/LICENSE (100%) create mode 100755 vendor/k8s.io/kube-openapi/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/README.md rename vendor/k8s.io/{apimachinery/pkg/openapi => kube-openapi/pkg/common}/common.go (98%) rename vendor/k8s.io/{kubernetes/pkg/apis/batch => kube-openapi/pkg/common}/doc.go (83%) delete mode 100644 vendor/k8s.io/kubernetes/.gazelcfg.json create mode 120000 vendor/k8s.io/kubernetes/.kazelcfg.json create mode 100644 vendor/k8s.io/kubernetes/CHANGELOG-1.8.md delete mode 100644 vendor/k8s.io/kubernetes/CHANGELOG.md create mode 100644 vendor/k8s.io/kubernetes/SUPPORT.md rename vendor/k8s.io/kubernetes/pkg/api/v1/{builder.go => register.go} (100%) delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/validation/schema.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/validation/schema_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD delete mode 100755 vendor/k8s.io/kubernetes/pkg/apis/apps/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/defaults_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/kubernetes/pkg/apis/authentication/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/install/install.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go delete mode 100755 vendor/k8s.io/kubernetes/pkg/apis/authorization/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD delete mode 100755 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD delete mode 100755 vendor/k8s.io/kubernetes/pkg/apis/batch/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD delete mode 100755 vendor/k8s.io/kubernetes/pkg/apis/certificates/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/helpers.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/helpers.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go delete mode 100755 vendor/k8s.io/kubernetes/pkg/apis/policy/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD delete mode 100755 vendor/k8s.io/kubernetes/pkg/apis/rbac/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/helpers.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/helpers.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/helpers.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/settings/install/install.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/settings/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/settings/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/settings/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD delete mode 100755 vendor/k8s.io/kubernetes/pkg/apis/storage/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/install/install.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/v1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/capabilities/capabilities_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/clientset.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/import_known_versions.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/admissionregistration_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/initializerconfiguration.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/authentication_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/authentication_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/tokenreview_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/authorization_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/localsubjectaccessreview.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/selfsubjectaccessreview.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/subjectaccessreview.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/subjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/authorization_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/localsubjectaccessreview.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/selfsubjectaccessreview.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/subjectaccessreview.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/subjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/autoscaling_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/autoscaling_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/batch_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/job.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificates_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificatesigningrequest.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/certificatesigningrequest_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/componentstatus.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/configmap.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/core_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/endpoints.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/event.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/event_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/limitrange.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/namespace.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/namespace_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/node.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/node_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/persistentvolume.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/persistentvolumeclaim.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/pod.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/pod_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/podtemplate.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/replicationcontroller.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/resourcequota.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/secret.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/service.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/service_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/serviceaccount.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/deployment.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/deployment_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/extensions_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/ingress.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/podsecuritypolicy.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/scale.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/scale_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/thirdpartyresource.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1/networking_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1/networkpolicy.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/eviction.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/eviction_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/poddisruptionbudget.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/policy_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/clusterrole.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/rbac_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/role.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/rolebinding.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/podpreset.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/storage_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/storageclass.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/storage_client.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/storageclass.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD rename vendor/k8s.io/kubernetes/pkg/{apis/authorization/v1beta1/conversion.go => kubelet/types/constants.go} (70%) rename vendor/k8s.io/kubernetes/pkg/{client/clientset_generated/clientset/typed/authorization/v1/generated_expansion.go => kubelet/types/doc.go} (80%) create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/labels.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/labels_test.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update_test.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/types_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/BUILD delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/file/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/util/file/file.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/io/consistentread.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/io/io.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/io/io_test.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/mount/exec.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows_test.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount_test.go rename vendor/k8s.io/kubernetes/pkg/util/{exec => pointer}/BUILD (72%) create mode 100644 vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go rename vendor/k8s.io/kubernetes/pkg/util/{util_test.go => pointer/pointer_test.go} (84%) create mode 100644 vendor/k8s.io/kubernetes/pkg/util/taints/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/util/taints/taints.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/taints/taints_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/template.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/template_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/umask.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/umask_windows.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go create mode 100644 vendor/k8s.io/utils/.travis.yml create mode 100644 vendor/k8s.io/utils/LICENSE create mode 100644 vendor/k8s.io/utils/README.md create mode 100644 vendor/k8s.io/utils/code-of-conduct.md rename vendor/k8s.io/{kubernetes/pkg/util => utils}/exec/doc.go (86%) rename vendor/k8s.io/{kubernetes/pkg/util => utils}/exec/exec.go (94%) rename vendor/k8s.io/{kubernetes/pkg/util => utils}/exec/exec_test.go (98%) rename vendor/k8s.io/{kubernetes/pkg/util/exec => utils/exec/testing}/fake_exec.go (85%) diff --git a/glide.lock b/glide.lock index 837c012f5d7..4488c60d1b9 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 1334daf8f668df5298c2ebb2132e3e45def3f7fdf3f3e4558912a1133424fed4 -updated: 2017-09-27T12:40:51.281879295-04:00 +hash: 1978c56baf7e45d013c9a8396fff480532b09d0c495f1c7eea17d12e9459fdc1 +updated: 2017-10-27T22:02:49.265783375-04:00 imports: - name: cloud.google.com/go version: 3b1ae45394a234c385be014e9a488f2bb6eef821 @@ -7,7 +7,7 @@ imports: - compute/metadata - internal - name: github.com/aws/aws-sdk-go - version: 93c0610f5cfe455cdccfdbe17bb0276764e62f1f + version: ee1f179877b2daf2aaabf71fa900773bf8842253 subpackages: - aws - aws/awserr @@ -52,38 +52,32 @@ imports: - name: github.com/dgrijalva/jwt-go version: 01aeca54ebda6e0fbfafd0a524d234159c05ec20 - name: github.com/docker/distribution - version: cd27f179f2c10c5d300e6d09025b538c475b0d51 + version: edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c subpackages: - - digest + - digestset - reference - name: github.com/docker/docker version: 092cba3727bb9b4a2f0e922cd6c0f93ea270e363 subpackages: + - api/types + - api/types/blkiodev + - api/types/container + - api/types/filters + - api/types/mount + - api/types/network + - api/types/registry + - api/types/strslice + - api/types/swarm + - api/types/versions - pkg/mount -- name: github.com/docker/engine-api - version: dea108d3aa0c67d7162a3fd8aa65f38a430019fd - subpackages: - - client - - client/transport - - client/transport/cancellable - - types - - types/blkiodev - - types/container - - types/filters - - types/network - - types/reference - - types/registry - - types/strslice - - types/time - - types/versions - name: github.com/docker/go-connections - version: f549a9393d05688dff0992ef3efd8bbe6c628aeb + version: 3ede32e2033de7505e6500d6c868c2b9ed9f169d subpackages: - nat - sockets - tlsconfig - name: github.com/docker/go-units - version: e30f1e79f3cd72542f2026ceec18d3bd67ab859c + version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 - name: github.com/docker/spdystream version: 449fdfce4d962303d702fec724ef0ad181c92528 subpackages: @@ -94,22 +88,16 @@ imports: - log - name: github.com/emicklei/go-restful-swagger12 version: dcef7f55730566d41eae5db10e7d6981829720f6 -- name: github.com/exponent-io/jsonpath - version: d6023ce2651d8eafb5c75bb0c7167536102ec9f5 - name: github.com/fsnotify/fsnotify version: f12c6236fe7b5cf6bcf30e5935d08cb079d78334 - name: github.com/ghodss/yaml version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - name: github.com/go-ini/ini - version: 2e44421e256d82ebbf3d4d4fcabe8930b905eff3 -- name: github.com/go-openapi/analysis - version: b44dc874b601d9e4e2f6e19140e794ba24bead3b + version: c787282c39ac1fc618827141a1f762240def08a3 - name: github.com/go-openapi/jsonpointer version: 46af16f9f7b149af66e5d1bd010e3574dc06de98 - name: github.com/go-openapi/jsonreference version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 -- name: github.com/go-openapi/loads - version: 18441dfa706d924a39a030ee2c3b1d8d81917b38 - name: github.com/go-openapi/spec version: 6aced65f8501fe1217321abf0749d354824ba2ff - name: github.com/go-openapi/swag @@ -120,7 +108,7 @@ imports: - proto - sortkeys - name: github.com/golang/glog - version: 44145f04b68cf362d9c4df2182967c2275eaefed + version: 23def4e6c14b4da8ac2ed8007337bc5eb5007998 - name: github.com/golang/groupcache version: 02826c3e79038b59d737d3b1c0a1d937f71a4433 subpackages: @@ -129,8 +117,20 @@ imports: version: 4bd1920723d7b7c925de087aa32e2187708897f7 subpackages: - proto + - ptypes + - ptypes/any + - ptypes/duration + - ptypes/timestamp +- name: github.com/google/btree + version: 7d79101e329e5a3adf994758c578dab82b90c017 - name: github.com/google/gofuzz version: 44d81051d367757e1c7c6a5a86423ece9afcf63c +- name: github.com/googleapis/gnostic + version: 0c5108395e2debce0d731cf0287ddf7242066aba + subpackages: + - OpenAPIv2 + - compiler + - extensions - name: github.com/gophercloud/gophercloud version: b4c2377fa77951a0e08163f52dc9b3e206355194 subpackages: @@ -163,6 +163,10 @@ imports: - openstack/networking/v2/ports - openstack/utils - pagination +- name: github.com/gregjones/httpcache + version: 787624de3eb7bd915c329cba748687a3b22666a6 + subpackages: + - diskcache - name: github.com/guelfey/go.dbus version: f6a3a2366cc39b8479cadc499d3c735fb10fbdda - name: github.com/hashicorp/golang-lru @@ -194,6 +198,8 @@ imports: version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 - name: github.com/jmespath/go-jmespath version: 3433f3ea46d9f8019119e7dd41274e112a2359a9 +- name: github.com/json-iterator/go + version: 36b14963da70d11297d313183d7e6388c8510e1e - name: github.com/juju/ratelimit version: 5b9ff866471762aa2ab2dced63c9fb6f53921342 - name: github.com/kr/fs @@ -248,12 +254,16 @@ imports: - matchers/support/goraph/node - matchers/support/goraph/util - types +- name: github.com/opencontainers/go-digest + version: a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb - name: github.com/pborman/uuid version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 - name: github.com/pelletier/go-buffruneio version: df1e16fde7fc330a0ca68167c23bf7ed6ac31d6d - name: github.com/pelletier/go-toml version: 0049ab3dc4c4c70a9eee23087437b69c0dde2130 +- name: github.com/peterbourgon/diskv + version: 5f041e8faa004a95c88a202771f4cc3e991971e6 - name: github.com/pkg/errors version: a22138067af1c4942683050411a841ade67fe1eb - name: github.com/pkg/sftp @@ -263,9 +273,10 @@ imports: subpackages: - jsonrpc2 - name: github.com/prometheus/client_golang - version: e51041b3fa41cece0dca035740ba6411905be473 + version: e7e903064f5e9eb5da98208bae10b475d4db0f8c subpackages: - prometheus + - prometheus/promhttp - name: github.com/prometheus/client_model version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6 subpackages: @@ -285,7 +296,7 @@ imports: - name: github.com/PuerkitoBio/urlesc version: 5bd2802263f21d8788851d5305584c82a5c75d7e - name: github.com/Sirupsen/logrus - version: 26709e2714106fb8ad40b773b711ebce25b78914 + version: 51fe59aca108dc5680109e7b2051cbdcfa5a253c - name: github.com/spf13/afero version: b28a7effac979219c2a2ed6205a4d70e4b1bcd02 subpackages: @@ -301,12 +312,8 @@ imports: version: 5ccb023bc27df288a957c5e994cd44fd19619465 - name: github.com/spf13/viper version: 7fb2782df3d83e0036cc89f461ed0422628776f4 -- name: github.com/ugorji/go - version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74 - subpackages: - - codec - name: golang.org/x/crypto - version: d172538b2cfce0c13cee31e647d0367aa8cd2486 + version: 81e90905daefcd6fd217b62423c0908922eadb30 subpackages: - curve25519 - ed25519 @@ -314,7 +321,7 @@ imports: - ssh - ssh/terminal - name: golang.org/x/net - version: f2499483f923065a842d38eb4c7f1927e6fc6e6d + version: 1c05540f6879653db88113bc4a2b70aec4bd491f subpackages: - context - context/ctxhttp @@ -333,10 +340,12 @@ imports: version: a2e06a18b0d52d8cb2010e04b372a1965d8e3439 subpackages: - unix + - windows - name: golang.org/x/text - version: 2910a502d2bf9e43193af9d68ca516529614eed3 + version: b19bf474d317b857955b12035d2c5acb57ce8b01 subpackages: - cases + - internal - internal/tag - language - runes @@ -347,15 +356,23 @@ imports: - unicode/norm - width - name: google.golang.org/api - version: e3824ed33c72bf7e81da0286772c34b987520914 + version: 98825bb0065da4054e5da6db34f5fc598e50bc24 subpackages: + - cloudkms/v1 + - cloudmonitoring/v2beta2 + - compute/v0.alpha + - compute/v0.beta - compute/v1 - container/v1 + - dns/v1 - gensupport - googleapi - googleapi/internal/uritemplates + - logging/v2beta1 + - monitoring/v3 + - pubsub/v1 - name: google.golang.org/appengine - version: 4f7eeb5305a4ba1966344836ba4af9996b7b4e05 + version: 24e4144ec923c2374f6b06610c0df16a9222c3d9 subpackages: - internal - internal/app_identity @@ -367,48 +384,55 @@ imports: - internal/urlfetch - urlfetch - name: gopkg.in/gcfg.v1 - version: 083575c3955c85df16fe9590cceab64d03f5eb6e + version: 298b7a6a3838f79debfaee8bd3bfb2b8d779e756 subpackages: - scanner - token - types - name: gopkg.in/inf.v0 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 +- name: gopkg.in/warnings.v0 + version: 8a331561fe74dadba6edfc59f3be66c22c3b065d - name: gopkg.in/yaml.v2 version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 - name: k8s.io/api - version: e24ed681f56165befbea50a24f7e9f1c079ac366 + version: 4df58c811fe2e65feb879227b2b245e4dc26e7ad subpackages: - admissionregistration/v1alpha1 - apps/v1beta1 + - apps/v1beta2 - authentication/v1 - authentication/v1beta1 - authorization/v1 - authorization/v1beta1 - autoscaling/v1 - - autoscaling/v2alpha1 + - autoscaling/v2beta1 - batch/v1 + - batch/v1beta1 - batch/v2alpha1 - certificates/v1beta1 - core/v1 - extensions/v1beta1 - networking/v1 - policy/v1beta1 + - rbac/v1 - rbac/v1alpha1 - rbac/v1beta1 + - scheduling/v1alpha1 - settings/v1alpha1 - storage/v1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: be41f5093e2b05c7a0befe35b04b715eb325ab43 + version: e509bb64fe1116e12a32273a2032426aa1a5fd26 subpackages: - pkg/apis/apiextensions - pkg/apis/apiextensions/v1beta1 - pkg/client/clientset/clientset - pkg/client/clientset/clientset/scheme - pkg/client/clientset/clientset/typed/apiextensions/v1beta1 + - pkg/features - name: k8s.io/apimachinery - version: 728b986edb9bd4bb8a022a8cc13573a97ecc2239 + version: 019ae5ada31de202164b118aee88ee2d14075c31 subpackages: - pkg/api/equality - pkg/api/errors @@ -418,6 +442,7 @@ imports: - pkg/apimachinery - pkg/apimachinery/announced - pkg/apimachinery/registered + - pkg/apis/meta/internalversion - pkg/apis/meta/v1 - pkg/apis/meta/v1/unstructured - pkg/apis/meta/v1/validation @@ -427,7 +452,6 @@ imports: - pkg/conversion/unstructured - pkg/fields - pkg/labels - - pkg/openapi - pkg/runtime - pkg/runtime/schema - pkg/runtime/serializer @@ -465,7 +489,7 @@ imports: - third_party/forked/golang/netutil - third_party/forked/golang/reflect - name: k8s.io/apiserver - version: 149fc2228647cea28b0670c240ec582e985e8eda + version: 9707c008294d447053c0e2737c256ee23f99e5b4 subpackages: - pkg/authentication/authenticator - pkg/authentication/serviceaccount @@ -473,7 +497,7 @@ imports: - pkg/features - pkg/util/feature - name: k8s.io/client-go - version: 42a124578af9e61f5c6902fa7b6b2cb6538f17d2 + version: 35ccd4336052e7d73018b1382413534936f34eee subpackages: - discovery - discovery/fake @@ -484,6 +508,8 @@ imports: - kubernetes/typed/admissionregistration/v1alpha1/fake - kubernetes/typed/apps/v1beta1 - kubernetes/typed/apps/v1beta1/fake + - kubernetes/typed/apps/v1beta2 + - kubernetes/typed/apps/v1beta2/fake - kubernetes/typed/authentication/v1 - kubernetes/typed/authentication/v1/fake - kubernetes/typed/authentication/v1beta1 @@ -494,10 +520,12 @@ imports: - kubernetes/typed/authorization/v1beta1/fake - kubernetes/typed/autoscaling/v1 - kubernetes/typed/autoscaling/v1/fake - - kubernetes/typed/autoscaling/v2alpha1 - - kubernetes/typed/autoscaling/v2alpha1/fake + - kubernetes/typed/autoscaling/v2beta1 + - kubernetes/typed/autoscaling/v2beta1/fake - kubernetes/typed/batch/v1 - kubernetes/typed/batch/v1/fake + - kubernetes/typed/batch/v1beta1 + - kubernetes/typed/batch/v1beta1/fake - kubernetes/typed/batch/v2alpha1 - kubernetes/typed/batch/v2alpha1/fake - kubernetes/typed/certificates/v1beta1 @@ -510,18 +538,20 @@ imports: - kubernetes/typed/networking/v1/fake - kubernetes/typed/policy/v1beta1 - kubernetes/typed/policy/v1beta1/fake + - kubernetes/typed/rbac/v1 + - kubernetes/typed/rbac/v1/fake - kubernetes/typed/rbac/v1alpha1 - kubernetes/typed/rbac/v1alpha1/fake - kubernetes/typed/rbac/v1beta1 - kubernetes/typed/rbac/v1beta1/fake + - kubernetes/typed/scheduling/v1alpha1 + - kubernetes/typed/scheduling/v1alpha1/fake - kubernetes/typed/settings/v1alpha1 - kubernetes/typed/settings/v1alpha1/fake - kubernetes/typed/storage/v1 - kubernetes/typed/storage/v1/fake - kubernetes/typed/storage/v1beta1 - kubernetes/typed/storage/v1beta1/fake - - pkg/api/v1 - - pkg/api/v1/ref - pkg/version - plugin/pkg/client/auth/gcp - rest @@ -536,18 +566,26 @@ imports: - tools/clientcmd/api/latest - tools/clientcmd/api/v1 - tools/metrics + - tools/pager - tools/record + - tools/reference - tools/remotecommand - transport + - transport/spdy - util/cert - util/exec - util/flowcontrol - util/homedir - util/integer - util/jsonpath + - util/retry - util/testing +- name: k8s.io/kube-openapi + version: 868f2f29720b192240e18284659231b440f9cda5 + subpackages: + - pkg/common - name: k8s.io/kubernetes - version: df7f4b3526a580ef122aaeef61ab52842a60a88b + version: bdaeafa71f6c7c04636251031f93464384d54963 subpackages: - pkg/api - pkg/api/helper @@ -557,83 +595,21 @@ imports: - pkg/api/v1 - pkg/api/v1/helper - pkg/api/v1/pod - - pkg/api/v1/ref - pkg/api/v1/service - pkg/api/validation - - pkg/apis/apps - - pkg/apis/apps/install - - pkg/apis/apps/v1beta1 - - pkg/apis/authentication - - pkg/apis/authentication/install - - pkg/apis/authentication/v1 - - pkg/apis/authentication/v1beta1 - - pkg/apis/authorization - - pkg/apis/authorization/install - - pkg/apis/authorization/v1 - - pkg/apis/authorization/v1beta1 - - pkg/apis/autoscaling - - pkg/apis/autoscaling/install - - pkg/apis/autoscaling/v1 - - pkg/apis/autoscaling/v2alpha1 - - pkg/apis/batch - - pkg/apis/batch/install - - pkg/apis/batch/v1 - - pkg/apis/batch/v2alpha1 - - pkg/apis/certificates - - pkg/apis/certificates/install - - pkg/apis/certificates/v1beta1 - pkg/apis/extensions - - pkg/apis/extensions/install - - pkg/apis/extensions/v1beta1 - pkg/apis/networking - - pkg/apis/policy - - pkg/apis/policy/install - - pkg/apis/policy/v1beta1 - - pkg/apis/rbac - - pkg/apis/rbac/install - - pkg/apis/rbac/v1alpha1 - - pkg/apis/rbac/v1beta1 - - pkg/apis/settings - - pkg/apis/settings/install - - pkg/apis/settings/v1alpha1 - - pkg/apis/storage - - pkg/apis/storage/install - - pkg/apis/storage/v1 - - pkg/apis/storage/v1beta1 - pkg/capabilities - - pkg/client/clientset_generated/clientset - - pkg/client/clientset_generated/clientset/scheme - - pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1 - - pkg/client/clientset_generated/clientset/typed/apps/v1beta1 - - pkg/client/clientset_generated/clientset/typed/authentication/v1 - - pkg/client/clientset_generated/clientset/typed/authentication/v1beta1 - - pkg/client/clientset_generated/clientset/typed/authorization/v1 - - pkg/client/clientset_generated/clientset/typed/authorization/v1beta1 - - pkg/client/clientset_generated/clientset/typed/autoscaling/v1 - - pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1 - - pkg/client/clientset_generated/clientset/typed/batch/v1 - - pkg/client/clientset_generated/clientset/typed/batch/v2alpha1 - - pkg/client/clientset_generated/clientset/typed/certificates/v1beta1 - - pkg/client/clientset_generated/clientset/typed/core/v1 - - pkg/client/clientset_generated/clientset/typed/extensions/v1beta1 - - pkg/client/clientset_generated/clientset/typed/networking/v1 - - pkg/client/clientset_generated/clientset/typed/policy/v1beta1 - - pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1 - - pkg/client/clientset_generated/clientset/typed/rbac/v1beta1 - - pkg/client/clientset_generated/clientset/typed/settings/v1alpha1 - - pkg/client/clientset_generated/clientset/typed/storage/v1 - - pkg/client/clientset_generated/clientset/typed/storage/v1beta1 - - pkg/client/retry - pkg/cloudprovider - pkg/controller - pkg/credentialprovider - pkg/credentialprovider/aws - pkg/features - pkg/kubelet/apis + - pkg/kubelet/types - pkg/security/apparmor - pkg/serviceaccount - - pkg/util - - pkg/util/exec + - pkg/util/file - pkg/util/goroutinemap - pkg/util/goroutinemap/exponentialbackoff - pkg/util/hash @@ -641,9 +617,16 @@ imports: - pkg/util/mount - pkg/util/net/sets - pkg/util/parsers + - pkg/util/pointer + - pkg/util/taints - pkg/util/version - pkg/volume - pkg/volume/util +- name: k8s.io/utils + version: 9fdc871a36f37980dd85f96d576b20d564cc0784 + subpackages: + - exec + - exec/testing testImports: - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d @@ -652,7 +635,7 @@ testImports: - name: github.com/stretchr/objx version: 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 - name: github.com/stretchr/testify - version: 890a5c3458b43e6104ff5da8dfa139d013d77544 + version: f6abca593680b2315d2075e0f5e2a9751e3f431a subpackages: - assert - mock diff --git a/glide.yaml b/glide.yaml index 8432d7fc6bf..e6f0f670eba 100644 --- a/glide.yaml +++ b/glide.yaml @@ -1,5 +1,6 @@ package: github.com/kubernetes-incubator/external-storage excludeDirs: +- docs/demo/hostpath-provisioner - nfs/test/e2e - repo-infra import: @@ -13,18 +14,20 @@ import: - package: github.com/guelfey/go.dbus - package: github.com/mitchellh/mapstructure version: db1efb5 +- package: gopkg.in/gcfg.v1 + version: v1.2.1 - package: k8s.io/apiserver - version: 149fc2228647cea28b0670c240ec582e985e8eda + version: kubernetes-1.8.2 - package: k8s.io/apiextensions-apiserver - version: be41f5093e2b05c7a0befe35b04b715eb325ab43 + version: kubernetes-1.8.2 - package: k8s.io/apimachinery - version: 728b986edb9bd4bb8a022a8cc13573a97ecc2239 + version: kubernetes-1.8.2 - package: k8s.io/client-go - version: 42a124578af9e61f5c6902fa7b6b2cb6538f17d2 + version: kubernetes-1.8.2 - package: k8s.io/api - version: e24ed681f56165befbea50a24f7e9f1c079ac366 + version: kubernetes-1.8.2 - package: k8s.io/kubernetes - version: df7f4b3526a580ef122aaeef61ab52842a60a88b + version: v1.8.2 - package: github.com/heketi/heketi version: 300752517908f3456e90d1ee2a773e7dd8ce1afe subpackages: diff --git a/gluster/glusterfs/pkg/volume/exec.go b/gluster/glusterfs/pkg/volume/exec.go index 6a1a9da8fda..18adb041ff0 100644 --- a/gluster/glusterfs/pkg/volume/exec.go +++ b/gluster/glusterfs/pkg/volume/exec.go @@ -23,7 +23,6 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apiRemotecommand "k8s.io/apimachinery/pkg/util/remotecommand" "k8s.io/client-go/tools/remotecommand" ) @@ -64,7 +63,7 @@ func (p *glusterfsProvisioner) ExecuteCommand( req.Param("command", c) } - exec, err := remotecommand.NewExecutor(p.config, "POST", req.URL()) + exec, err := remotecommand.NewSPDYExecutor(p.config, "POST", req.URL()) if err != nil { glog.Fatalf("Failed to create NewExecutor: %v", err) return err @@ -74,10 +73,9 @@ func (p *glusterfsProvisioner) ExecuteCommand( var berr bytes.Buffer err = exec.Stream(remotecommand.StreamOptions{ - SupportedProtocols: apiRemotecommand.SupportedStreamingProtocols, - Stdout: &b, - Stderr: &berr, - Tty: false, + Stdout: &b, + Stderr: &berr, + Tty: false, }) glog.Infof("Result: %v", b.String()) diff --git a/lib/controller/controller.go b/lib/controller/controller.go index f4bff76e9f1..8d4bd6d6cdb 100644 --- a/lib/controller/controller.go +++ b/lib/controller/controller.go @@ -38,9 +38,9 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/pkg/api/v1/ref" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + ref "k8s.io/client-go/tools/reference" "k8s.io/kubernetes/pkg/api/v1/helper" "k8s.io/kubernetes/pkg/util/goroutinemap" utilversion "k8s.io/kubernetes/pkg/util/version" @@ -410,7 +410,7 @@ func (ctrl *ProvisionController) Run(stopCh <-chan struct{}) { ctrl.hasRunLock.Unlock() go ctrl.claimController.Run(stopCh) go ctrl.volumeController.Run(stopCh) - go ctrl.classReflector.RunUntil(stopCh) + go ctrl.classReflector.Run(stopCh) <-stopCh } diff --git a/lib/controller/controller_test.go b/lib/controller/controller_test.go index 241e0e31900..a6bff8cddce 100644 --- a/lib/controller/controller_test.go +++ b/lib/controller/controller_test.go @@ -38,9 +38,9 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" fakev1core "k8s.io/client-go/kubernetes/typed/core/v1/fake" - "k8s.io/client-go/pkg/api/v1/ref" testclient "k8s.io/client-go/testing" fcache "k8s.io/client-go/tools/cache/testing" + ref "k8s.io/client-go/tools/reference" ) const ( diff --git a/snapshot/pkg/cloudprovider/providers/openstack/metadata.go b/snapshot/pkg/cloudprovider/providers/openstack/metadata.go index 077f5026f0a..ae914ac8e1f 100644 --- a/snapshot/pkg/cloudprovider/providers/openstack/metadata.go +++ b/snapshot/pkg/cloudprovider/providers/openstack/metadata.go @@ -28,8 +28,8 @@ import ( "strings" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/utils/exec" ) // metadataURL is URL to OpenStack metadata server. It's hardcoded IPv4 diff --git a/snapshot/pkg/controller/snapshotter/snapshotter.go b/snapshot/pkg/controller/snapshotter/snapshotter.go index 400f3b2eb3e..487e6d5f178 100644 --- a/snapshot/pkg/controller/snapshotter/snapshotter.go +++ b/snapshot/pkg/controller/snapshotter/snapshotter.go @@ -764,7 +764,7 @@ func (vs *volumeSnapshotter) UpdateVolumeSnapshotStatus(snapshot *crdv1.VolumeSn condition.Status == oldCondition.Status && condition.Reason == oldCondition.Reason && condition.Message == oldCondition.Message && - condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) + condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) } if !isEqual { diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml index 2d8c086617e..c3af3ce27cd 100644 --- a/vendor/github.com/Sirupsen/logrus/.travis.yml +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -2,7 +2,9 @@ language: go go: - 1.2 - 1.3 - - 1.4 - tip install: - - go get -t ./... + - go get github.com/stretchr/testify + - go get github.com/stvp/go-udp-testing + - go get github.com/tobi/airbrake-go + - go get github.com/getsentry/raven-go diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index eb72bff93b7..00000000000 --- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,7 +0,0 @@ -# 0.7.3 - -formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md index d55f9092479..b6aa84c987f 100644 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -1,11 +1,10 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** +yet stable (pre 1.0), the core API is unlikely change much but please version +control your Logrus to make sure you aren't fetching latest `master` on every +build.** Nicely color-coded in development (when a TTY is attached, otherwise just plain text): @@ -34,16 +33,14 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: +[l2met](http://r.32k.io/l2met-introduction) format: ```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 +time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10 +time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122 +time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10 +time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9 +time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100 ``` #### Example @@ -84,7 +81,7 @@ func init() { // Use the Airbrake hook to report errors that have Error severity or above to // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) + log.AddHook(&logrus_airbrake.AirbrakeHook{}) // Output to stderr instead of stdout, could also be a file. log.SetOutput(os.Stderr) @@ -108,16 +105,6 @@ func main() { "omg": true, "number": 100, }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") } ``` @@ -176,8 +163,43 @@ You can add hooks for logging levels. For example to send errors to an exception tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to multiple places simultaneously, e.g. syslog. -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: +```go +// Not the real implementation of the Airbrake hook. Just a simple sample. +import ( + log "github.com/Sirupsen/logrus" +) + +func init() { + log.AddHook(new(AirbrakeHook)) +} + +type AirbrakeHook struct{} + +// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains +// the fields for the entry. See the Fields section of the README. +func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { + err := airbrake.Notify(entry.Data["error"].(error)) + if err != nil { + log.WithFields(log.Fields{ + "source": "airbrake", + "endpoint": airbrake.Endpoint, + }).Info("Failed to send error to Airbrake") + } + + return nil +} + +// `Levels()` returns a slice of `Levels` the hook is fired for. +func (hook *AirbrakeHook) Levels() []log.Level { + return []log.Level{ + log.ErrorLevel, + log.FatalLevel, + log.PanicLevel, + } +} +``` + +Logrus comes with built-in hooks. Add those, or your custom hook, in `init`: ```go import ( @@ -188,7 +210,7 @@ import ( ) func init() { - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) + log.AddHook(new(logrus_airbrake.AirbrakeHook)) hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") if err != nil { @@ -199,18 +221,22 @@ func init() { } ``` +* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) + Send errors to an exception tracking service compatible with the Airbrake API. + Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | +* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) + Send errors to the Papertrail hosted logging service via UDP. + +* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) + Send errors to remote syslog server. + Uses standard library `log/syslog` behind the scenes. + +* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus) + Send errors to a channel in hipchat. + +* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly) + Send logs to Loggly (https://www.loggly.com/) #### Level logging @@ -288,15 +314,10 @@ The built-in logging formatters are: field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true` * `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). - - ```go - logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) - ``` Third party logging formatters: -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a @@ -321,28 +342,10 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { } ``` -#### Logger as an `io.Writer` - -Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - #### Rotation Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log +external program (like `logrotated(8)`) that can compress and delete old log entries. It should not be a feature of the application-level logger. diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go index 17fe6f707bc..e164eecb5f3 100644 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -126,10 +126,6 @@ func (entry *Entry) Warn(args ...interface{}) { } } -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - func (entry *Entry) Error(args ...interface{}) { if entry.Logger.Level >= ErrorLevel { entry.log(ErrorLevel, fmt.Sprint(args...)) diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go index a67e1b802d9..d0871244814 100644 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -9,10 +9,6 @@ var ( std = New() ) -func StandardLogger() *Logger { - return std -} - // SetOutput sets the standard logger output. func SetOutput(out io.Writer) { std.mu.Lock() @@ -36,8 +32,6 @@ func SetLevel(level Level) { // GetLevel returns the standard logger level. func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() return std.Level } diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go index 104d689f187..038ce9fd297 100644 --- a/vendor/github.com/Sirupsen/logrus/formatter.go +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -1,9 +1,5 @@ package logrus -import "time" - -const DefaultTimestampFormat = time.RFC3339 - // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: // diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go index dcc4f1d9fd7..b09227c2b5d 100644 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -3,32 +3,18 @@ package logrus import ( "encoding/json" "fmt" + "time" ) -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} +type JSONFormatter struct{} func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data := make(Fields, len(entry.Data)+3) for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } + data[k] = v } prefixFieldClashes(data) - - if f.TimestampFormat == "" { - f.TimestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(f.TimestampFormat) + data["time"] = entry.Time.Format(time.RFC3339) data["msg"] = entry.Message data["level"] = entry.Level.String() diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go deleted file mode 100644 index 1d70873254d..00000000000 --- a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package logrus - -import ( - "encoding/json" - "errors" - - "testing" -) - -func TestErrorNotLost(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["error"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["omg"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestFieldClashWithTime(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("time", "right now!")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.time"] != "right now!" { - t.Fatal("fields.time not set to original time field") - } - - if entry["time"] != "0001-01-01T00:00:00Z" { - t.Fatal("time field not set to current time, was: ", entry["time"]) - } -} - -func TestFieldClashWithMsg(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("msg", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.msg"] != "something" { - t.Fatal("fields.msg not set to original msg field") - } -} - -func TestFieldClashWithLevel(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.level"] != "something" { - t.Fatal("fields.level not set to original level field") - } -} - -func TestJSONEntryEndsWithNewline(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - if b[len(b)-1] != '\n' { - t.Fatal("Expected JSON log entry to end with a newline") - } -} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go index da928a37509..b392e547a7b 100644 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -65,15 +65,11 @@ func (logger *Logger) WithFields(fields Fields) *Entry { } func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } + NewEntry(logger).Debugf(format, args...) } func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } + NewEntry(logger).Infof(format, args...) } func (logger *Logger) Printf(format string, args ...interface{}) { @@ -81,45 +77,31 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } + NewEntry(logger).Warnf(format, args...) } func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } + NewEntry(logger).Warnf(format, args...) } func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } + NewEntry(logger).Errorf(format, args...) } func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } + NewEntry(logger).Fatalf(format, args...) } func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } + NewEntry(logger).Panicf(format, args...) } func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } + NewEntry(logger).Debug(args...) } func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } + NewEntry(logger).Info(args...) } func (logger *Logger) Print(args ...interface{}) { @@ -127,45 +109,31 @@ func (logger *Logger) Print(args ...interface{}) { } func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } + NewEntry(logger).Warn(args...) } func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } + NewEntry(logger).Warn(args...) } func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } + NewEntry(logger).Error(args...) } func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } + NewEntry(logger).Fatal(args...) } func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } + NewEntry(logger).Panic(args...) } func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } + NewEntry(logger).Debugln(args...) } func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } + NewEntry(logger).Infoln(args...) } func (logger *Logger) Println(args ...interface{}) { @@ -173,31 +141,21 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } + NewEntry(logger).Warnln(args...) } func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } + NewEntry(logger).Warnln(args...) } func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } + NewEntry(logger).Errorln(args...) } func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } + NewEntry(logger).Fatalln(args...) } func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } + NewEntry(logger).Panicln(args...) } diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go index efaacea2361..53025425816 100644 --- a/vendor/github.com/Sirupsen/logrus/logrus_test.go +++ b/vendor/github.com/Sirupsen/logrus/logrus_test.go @@ -5,7 +5,6 @@ import ( "encoding/json" "strconv" "strings" - "sync" "testing" "github.com/stretchr/testify/assert" @@ -191,7 +190,7 @@ func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { log.WithField("level", 1).Info("test") }, func(fields Fields) { assert.Equal(t, fields["level"], "info") - assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only + assert.Equal(t, fields["fields.level"], 1) }) } @@ -224,7 +223,7 @@ func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { err := json.Unmarshal(buffer.Bytes(), &fields) assert.NoError(t, err, "should have decoded first message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Len(t, fields, 4, "should only have msg/time/level/context fields") assert.Equal(t, fields["msg"], "looks delicious") assert.Equal(t, fields["context"], "eating raw fish") @@ -234,7 +233,7 @@ func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { err = json.Unmarshal(buffer.Bytes(), &fields) assert.NoError(t, err, "should have decoded second message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Len(t, fields, 4, "should only have msg/time/level/context fields") assert.Equal(t, fields["msg"], "omg it is!") assert.Equal(t, fields["context"], "eating raw fish") assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") @@ -282,20 +281,3 @@ func TestParseLevel(t *testing.T) { l, err = ParseLevel("invalid") assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) } - -func TestGetSetLevelRace(t *testing.T) { - wg := sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - if i%2 == 0 { - SetLevel(InfoLevel) - } else { - GetLevel() - } - }(i) - - } - wg.Wait() -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go index b8bebc13eea..276447bd5c0 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux darwin freebsd openbsd +// +build linux,!appengine darwin freebsd package logrus diff --git a/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go b/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go deleted file mode 100644 index af609a53d64..00000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go index 612417ff9c8..78e78893568 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -3,6 +3,7 @@ package logrus import ( "bytes" "fmt" + "regexp" "sort" "strings" "time" @@ -14,12 +15,12 @@ const ( green = 32 yellow = 33 blue = 34 - gray = 37 ) var ( baseTimestamp time.Time isTerminal bool + noQuoteNeeded *regexp.Regexp ) func init() { @@ -33,37 +34,20 @@ func miniTS() int { type TextFormatter struct { // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. + ForceColors bool DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. + // Set to true to disable timestamp logging (useful when the output + // is redirected to a logging system already adding a timestamp) DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool } func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) + + var keys []string for k := range entry.Data { keys = append(keys, k) } - - if !f.DisableSorting { - sort.Strings(keys) - } + sort.Strings(keys) b := &bytes.Buffer{} @@ -71,14 +55,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { isColored := (f.ForceColors || isTerminal) && !f.DisableColors - if f.TimestampFormat == "" { - f.TimestampFormat = DefaultTimestampFormat - } if isColored { - f.printColored(b, entry, keys) + printColored(b, entry, keys) } else { if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat)) + f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339)) } f.appendKeyValue(b, "level", entry.Level.String()) f.appendKeyValue(b, "msg", entry.Message) @@ -91,11 +72,9 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { return b.Bytes(), nil } -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { +func printColored(b *bytes.Buffer, entry *Entry, keys []string) { var levelColor int switch entry.Level { - case DebugLevel: - levelColor = gray case WarnLevel: levelColor = yellow case ErrorLevel, FatalLevel, PanicLevel: @@ -106,11 +85,7 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin levelText := strings.ToUpper(entry.Level.String())[0:4] - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) - } + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) for _, k := range keys { v := entry.Data[k] fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) @@ -121,7 +96,7 @@ func needsQuoting(text string) bool { for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || + (ch >= '0' && ch < '9') || ch == '-' || ch == '.') { return false } diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go index e25a44f67bf..f604f1b0026 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go @@ -3,8 +3,8 @@ package logrus import ( "bytes" "errors" + "testing" - "time" ) func TestQuoting(t *testing.T) { @@ -25,37 +25,9 @@ func TestQuoting(t *testing.T) { checkQuoting(false, "abcd") checkQuoting(false, "v1.0") - checkQuoting(false, "1234567890") checkQuoting(true, "/foobar") checkQuoting(true, "x y") checkQuoting(true, "x,y") checkQuoting(false, errors.New("invalid")) checkQuoting(true, errors.New("invalid argument")) } - -func TestTimestampFormat(t *testing.T) { - checkTimeStr := func(format string) { - customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} - customStr, _ := customFormatter.Format(WithField("test", "test")) - timeStart := bytes.Index(customStr, ([]byte)("time=")) - timeEnd := bytes.Index(customStr, ([]byte)("level=")) - timeStr := customStr[timeStart+5 : timeEnd-1] - if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { - timeStr = timeStr[1 : len(timeStr)-1] - } - if format == "" { - format = time.RFC3339 - } - _, e := time.Parse(format, (string)(timeStr)) - if e != nil { - t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) - } - } - - checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") - checkTimeStr("Mon Jan _2 15:04:05 2006") - checkTimeStr("") -} - -// TODO add tests for sorting etc., this requires a parser for the text -// formatter output. diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index 1e30b1c753a..00000000000 --- a/vendor/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,31 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - reader, writer := io.Pipe() - - go logger.writerScanner(reader) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - logger.Print(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/aws/aws-sdk-go/.travis.yml b/vendor/github.com/aws/aws-sdk-go/.travis.yml index 130d91151f1..4b861364bbb 100644 --- a/vendor/github.com/aws/aws-sdk-go/.travis.yml +++ b/vendor/github.com/aws/aws-sdk-go/.travis.yml @@ -3,11 +3,11 @@ language: go sudo: required go: - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x - tip # Use Go 1.5's vendoring experiment for 1.5 tests. @@ -23,3 +23,7 @@ script: matrix: allow_failures: - go: tip + +branches: + only: + - master diff --git a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md index a404db8cbea..ce37d3aadd4 100644 --- a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md @@ -1,3 +1,192 @@ +Release v1.12.19 (2017-10-26) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.12.18 (2017-10-26) +=== + +### Service Client Updates +* `service/cloudfront`: Updates service API and documentation + * You can now specify additional options for MinimumProtocolVersion, which controls the SSL/TLS protocol that CloudFront uses to communicate with viewers. The minimum protocol version that you choose also determines the ciphers that CloudFront uses to encrypt the content that it returns to viewers. +* `service/ec2`: Updates service API + * You are now able to create and launch EC2 P3 instance, next generation GPU instances, optimized for machine learning and high performance computing applications. With up to eight NVIDIA Tesla V100 GPUs, P3 instances provide up to one petaflop of mixed-precision, 125 teraflops of single-precision, and 62 teraflops of double-precision floating point performance, as well as a 300 GB/s second-generation NVLink interconnect that enables high-speed, low-latency GPU-to-GPU communication. P3 instances also feature up to 64 vCPUs based on custom Intel Xeon E5 (Broadwell) processors, 488 GB of DRAM, and 25 Gbps of dedicated aggregate network bandwidth using the Elastic Network Adapter (ENA). +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.12.17 (2017-10-24) +=== + +### Service Client Updates +* `service/config`: Updates service API +* `service/elasticache`: Updates service API, documentation, and examples + * Amazon ElastiCache for Redis today announced support for data encryption both for data in-transit and data at-rest. The new encryption in-transit functionality enables ElastiCache for Redis customers to encrypt data for all communication between clients and Redis engine, and all intra-cluster Redis communication. The encryption at-rest functionality allows customers to encrypt their S3 based backups. Customers can begin using the new functionality by simply enabling this functionality via AWS console, and a small configuration change in their Redis clients. The ElastiCache for Redis service automatically manages life cycle of the certificates required for encryption, including the issuance, renewal and expiration of certificates. Additionally, as part of this launch, customers will gain the ability to start using the Redis AUTH command that provides an added level of authentication. +* `service/glue`: Adds new service + * AWS Glue: Adding a new API, BatchStopJobRun, to stop one or more job runs for a specified Job. +* `service/pinpoint`: Updates service API and documentation + * Added support for APNs VoIP messages. Added support for collapsible IDs, message priority, and TTL for APNs and FCM/GCM. + +Release v1.12.16 (2017-10-23) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/organizations`: Updates service API and documentation + * This release supports integrating other AWS services with AWS Organizations through the use of an IAM service-linked role called AWSServiceRoleForOrganizations. Certain operations automatically create that role if it does not already exist. + +Release v1.12.15 (2017-10-20) +=== + +### Service Client Updates +* `service/ec2`: Updates service API and documentation + * Adding pagination support for DescribeSecurityGroups for EC2 Classic and VPC Security Groups + +Release v1.12.14 (2017-10-19) +=== + +### Service Client Updates +* `service/sqs`: Updates service API and documentation + * Added support for tracking cost allocation by adding, updating, removing, and listing the metadata tags of Amazon SQS queues. +* `service/ssm`: Updates service API and documentation + * EC2 Systems Manager versioning support for Parameter Store. Also support for referencing parameter versions in SSM Documents. + +Release v1.12.13 (2017-10-18) +=== + +### Service Client Updates +* `service/lightsail`: Updates service API and documentation + * This release adds support for Windows Server-based Lightsail instances. The GetInstanceAccessDetails API now returns the password of your Windows Server-based instance when using the default key pair. GetInstanceAccessDetails also returns a PasswordData object for Windows Server instances containing the ciphertext and keyPairName. The Blueprint data type now includes a list of platform values (LINUX_UNIX or WINDOWS). The Bundle data type now includes a list of SupportedPlatforms values (LINUX_UNIX or WINDOWS). + +Release v1.12.12 (2017-10-17) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/es`: Updates service API and documentation + * This release adds support for VPC access to Amazon Elasticsearch Service. + * This release adds support for VPC access to Amazon Elasticsearch Service. + +Release v1.12.11 (2017-10-16) +=== + +### Service Client Updates +* `service/cloudhsm`: Updates service API and documentation + * Documentation updates for AWS CloudHSM Classic. +* `service/ec2`: Updates service API and documentation + * You can now change the tenancy of your VPC from dedicated to default with a single API operation. For more details refer to the documentation for changing VPC tenancy. +* `service/es`: Updates service API and documentation + * AWS Elasticsearch adds support for enabling slow log publishing. Using slow log publishing options customers can configure and enable index/query slow log publishing of their domain to preferred AWS Cloudwatch log group. +* `service/rds`: Updates service API and waiters + * Adds waiters for DBSnapshotAvailable and DBSnapshotDeleted. +* `service/waf`: Updates service API and documentation + * This release adds support for regular expressions as match conditions in rules, and support for geographical location by country of request IP address as a match condition in rules. +* `service/waf-regional`: Updates service API and documentation + +Release v1.12.10 (2017-10-12) +=== + +### Service Client Updates +* `service/codecommit`: Updates service API and documentation + * This release includes the DeleteBranch API and a change to the contents of a Commit object. +* `service/dms`: Updates service API and documentation + * This change includes addition of new optional parameter to an existing API +* `service/elasticbeanstalk`: Updates service API and documentation + * Added the ability to add, delete or update Tags +* `service/polly`: Updates service API + * Amazon Polly exposes two new voices: "Matthew" (US English) and "Takumi" (Japanese) +* `service/rds`: Updates service API and documentation + * You can now call DescribeValidDBInstanceModifications to learn what modifications you can make to your DB instance. You can use this information when you call ModifyDBInstance. + +Release v1.12.9 (2017-10-11) +=== + +### Service Client Updates +* `service/ecr`: Updates service API, documentation, and paginators + * Adds support for new API set used to manage Amazon ECR repository lifecycle policies. Amazon ECR lifecycle policies enable you to specify the lifecycle management of images in a repository. The configuration is a set of one or more rules, where each rule defines an action for Amazon ECR to apply to an image. This allows the automation of cleaning up unused images, for example expiring images based on age or status. A lifecycle policy preview API is provided as well, which allows you to see the impact of a lifecycle policy on an image repository before you execute it +* `service/email`: Updates service API and documentation + * Added content related to email template management and templated email sending operations. +* `aws/endpoints`: Updated Regions and Endpoints metadata. + +Release v1.12.8 (2017-10-10) +=== + +### Service Client Updates +* `service/ec2`: Updates service API and documentation + * This release includes updates to AWS Virtual Private Gateway. +* `service/elasticloadbalancingv2`: Updates service API and documentation +* `service/opsworkscm`: Updates service API and documentation + * Provide engine specific information for node associations. + +Release v1.12.7 (2017-10-06) +=== + +### Service Client Updates +* `service/sqs`: Updates service documentation + * Documentation updates regarding availability of FIFO queues and miscellaneous corrections. + +Release v1.12.6 (2017-10-05) +=== + +### Service Client Updates +* `service/redshift`: Updates service API and documentation + * DescribeEventSubscriptions API supports tag keys and tag values as request parameters. + +Release v1.12.5 (2017-10-04) +=== + +### Service Client Updates +* `service/kinesisanalytics`: Updates service API and documentation + * Kinesis Analytics now supports schema discovery on objects in S3. Additionally, Kinesis Analytics now supports input data preprocessing through Lambda. +* `service/route53domains`: Updates service API and documentation + * Added a new API that checks whether a domain name can be transferred to Amazon Route 53. + +### SDK Bugs +* `service/s3/s3crypto`: Correct PutObjectRequest documentation ([#1568](https://github.com/aws/aws-sdk-go/pull/1568)) + * s3Crypto's PutObjectRequest docstring example was using an incorrect value. Corrected the type used in the example. +Release v1.12.4 (2017-10-03) +=== + +### Service Client Updates +* `service/ec2`: Updates service API, documentation, and waiters + * This release includes service updates to AWS VPN. +* `service/ssm`: Updates service API and documentation + * EC2 Systems Manager support for tagging SSM Documents. Also support for tag-based permissions to restrict access to SSM Documents based on these tags. + +Release v1.12.3 (2017-10-02) +=== + +### Service Client Updates +* `service/cloudhsm`: Updates service documentation and paginators + * Documentation updates for CloudHSM + +Release v1.12.2 (2017-09-29) +=== + +### Service Client Updates +* `service/appstream`: Updates service API and documentation + * Includes APIs for managing and accessing image builders, and deleting images. +* `service/codebuild`: Updates service API and documentation + * Adding support for Building GitHub Pull Requests in AWS CodeBuild +* `service/mturk-requester`: Updates service API and documentation +* `service/organizations`: Updates service API and documentation + * This release flags the HandshakeParty structure's Type and Id fields as 'required'. They effectively were required in the past, as you received an error if you did not include them. This is now reflected at the API definition level. +* `service/route53`: Updates service API and documentation + * This change allows customers to reset elements of health check. + +### SDK Bugs +* `private/protocol/query`: Fix query protocol handling of nested byte slices ([#1557](https://github.com/aws/aws-sdk-go/issues/1557)) + * Fixes the query protocol to correctly marshal nested []byte values of API operations. +* `service/s3`: Fix PutObject and UploadPart API to include ContentMD5 field ([#1559](https://github.com/aws/aws-sdk-go/pull/1559)) + * Fixes the SDK's S3 PutObject and UploadPart API code generation to correctly render the ContentMD5 field into the associated input types for these two API operations. + * Fixes [#1553](https://github.com/aws/aws-sdk-go/pull/1553) +Release v1.12.1 (2017-09-27) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/pinpoint`: Updates service API and documentation + * Added two new push notification channels: Amazon Device Messaging (ADM) and, for push notification support in China, Baidu Cloud Push. Added support for APNs auth via .p8 key file. Added operation for direct message deliveries to user IDs, enabling you to message an individual user on multiple endpoints. + Release v1.12.0 (2017-09-26) === diff --git a/vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md b/vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md index 7c0186f0c9e..6f422a95edb 100644 --- a/vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md +++ b/vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md @@ -67,7 +67,7 @@ Please be aware of the following notes prior to opening a pull request: 5. The JSON files under the SDK's `models` folder are sourced from outside the SDK. Such as `models/apis/ec2/2016-11-15/api.json`. We will not accept pull requests directly on these models. If you discover an issue with the models please - create a Github [issue](issues) describing the issue. + create a [GitHub issue][issues] describing the issue. ### Testing diff --git a/vendor/github.com/aws/aws-sdk-go/Gopkg.lock b/vendor/github.com/aws/aws-sdk-go/Gopkg.lock new file mode 100644 index 00000000000..854c94fdf04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/Gopkg.lock @@ -0,0 +1,20 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/go-ini/ini" + packages = ["."] + revision = "300e940a926eb277d3901b20bdfcc54928ad3642" + version = "v1.25.4" + +[[projects]] + name = "github.com/jmespath/go-jmespath" + packages = ["."] + revision = "0b12d6b5" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "51a86a867df617990082dec6b868e4efe2fdb2ed0e02a3daa93cd30f962b5085" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/aws/aws-sdk-go/Gopkg.toml b/vendor/github.com/aws/aws-sdk-go/Gopkg.toml new file mode 100644 index 00000000000..664fc59553f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/Gopkg.toml @@ -0,0 +1,48 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = [ + # Testing/Example/Codegen dependencies + "github.com/stretchr/testify", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/require", + "github.com/go-sql-driver/mysql", + "github.com/gucumber/gucumber", + "github.com/pkg/errors", + "golang.org/x/net", + "golang.org/x/net/html", + "golang.org/x/net/http2", + "golang.org/x/text", + "golang.org/x/text/html", + "golang.org/x/tools", + "golang.org/x/tools/go/loader", +] + + +[[constraint]] + name = "github.com/go-ini/ini" + version = "1.25.4" + +[[constraint]] + name = "github.com/jmespath/go-jmespath" + revision = "0b12d6b5" + #version = "0.2.2" diff --git a/vendor/github.com/aws/aws-sdk-go/README.md b/vendor/github.com/aws/aws-sdk-go/README.md index d5e048a5f18..d8acfdd0024 100644 --- a/vendor/github.com/aws/aws-sdk-go/README.md +++ b/vendor/github.com/aws/aws-sdk-go/README.md @@ -185,7 +185,7 @@ Option's SharedConfigState parameter. })) ``` -[credentials_pkg]: ttps://docs.aws.amazon.com/sdk-for-go/api/aws/credentials +[credentials_pkg]: https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials ### Configuring AWS Region diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go index 0e75c5eea1c..007b37be19e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go @@ -5,11 +5,11 @@ import ( "fmt" "io" "io/ioutil" + "reflect" "testing" "time" "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/stretchr/testify/assert" ) func ExampleCopy() { @@ -81,12 +81,24 @@ func TestCopy1(t *testing.T) { awsutil.Copy(&f2, f1) // Values are equal - assert.Equal(t, f2.A, f1.A) - assert.Equal(t, f2.B, f1.B) - assert.Equal(t, f2.C, f1.C) - assert.Equal(t, f2.D, f1.D) - assert.Equal(t, f2.E.B, f1.E.B) - assert.Equal(t, f2.E.D, f1.E.D) + if v1, v2 := f2.A, f1.A; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := f2.B, f1.B; !reflect.DeepEqual(v1, v2) { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := f2.C, f1.C; !reflect.DeepEqual(v1, v2) { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := f2.D, f1.D; !v1.Equal(*v2) { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := f2.E.B, f1.E.B; !reflect.DeepEqual(v1, v2) { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := f2.E.D, f1.E.D; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } // But pointers are not! str3 := "nothello" @@ -99,14 +111,30 @@ func TestCopy1(t *testing.T) { *f2.E.B = int3 f2.E.c = 5 f2.E.D = 5 - assert.NotEqual(t, f2.A, f1.A) - assert.NotEqual(t, f2.B, f1.B) - assert.NotEqual(t, f2.C, f1.C) - assert.NotEqual(t, f2.D, f1.D) - assert.NotEqual(t, f2.E.a, f1.E.a) - assert.NotEqual(t, f2.E.B, f1.E.B) - assert.NotEqual(t, f2.E.c, f1.E.c) - assert.NotEqual(t, f2.E.D, f1.E.D) + if v1, v2 := f2.A, f1.A; v1 == v2 { + t.Errorf("expected values to be not equivalent, but received %v", v1) + } + if v1, v2 := f2.B, f1.B; reflect.DeepEqual(v1, v2) { + t.Errorf("expected values to be not equivalent, but received %v", v1) + } + if v1, v2 := f2.C, f1.C; reflect.DeepEqual(v1, v2) { + t.Errorf("expected values to be not equivalent, but received %v", v1) + } + if v1, v2 := f2.D, f1.D; v1 == v2 { + t.Errorf("expected values to be not equivalent, but received %v", v1) + } + if v1, v2 := f2.E.a, f1.E.a; v1 == v2 { + t.Errorf("expected values to be not equivalent, but received %v", v1) + } + if v1, v2 := f2.E.B, f1.E.B; v1 == v2 { + t.Errorf("expected values to be not equivalent, but received %v", v1) + } + if v1, v2 := f2.E.c, f1.E.c; v1 == v2 { + t.Errorf("expected values to be not equivalent, but received %v", v1) + } + if v1, v2 := f2.E.D, f1.E.D; v1 == v2 { + t.Errorf("expected values to be not equivalent, but received %v", v1) + } } func TestCopyNestedWithUnexported(t *testing.T) { @@ -125,10 +153,18 @@ func TestCopyNestedWithUnexported(t *testing.T) { awsutil.Copy(&f2, f1) // Values match - assert.Equal(t, f2.A, f1.A) - assert.NotEqual(t, f2.B, f1.B) - assert.NotEqual(t, f2.B.a, f1.B.a) - assert.Equal(t, f2.B.B, f2.B.B) + if v1, v2 := f2.A, f1.A; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := f2.B, f1.B; v1 == v2 { + t.Errorf("expected values to be not equivalent, but received %v", v1) + } + if v1, v2 := f2.B.a, f1.B.a; v1 == v2 { + t.Errorf("expected values to be not equivalent, but received %v", v1) + } + if v1, v2 := f2.B.B, f2.B.B; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } } func TestCopyIgnoreNilMembers(t *testing.T) { @@ -139,34 +175,56 @@ func TestCopyIgnoreNilMembers(t *testing.T) { } f := &Foo{} - assert.Nil(t, f.A) - assert.Nil(t, f.B) - assert.Nil(t, f.C) + if v1 := f.A; v1 != nil { + t.Errorf("expected nil, but received %v", v1) + } + if v1 := f.B; v1 != nil { + t.Errorf("expected nil, but received %v", v1) + } + if v1 := f.C; v1 != nil { + t.Errorf("expected nil, but received %v", v1) + } var f2 Foo awsutil.Copy(&f2, f) - assert.Nil(t, f2.A) - assert.Nil(t, f2.B) - assert.Nil(t, f2.C) + if v1 := f2.A; v1 != nil { + t.Errorf("expected nil, but received %v", v1) + } + if v1 := f2.B; v1 != nil { + t.Errorf("expected nil, but received %v", v1) + } + if v1 := f2.C; v1 != nil { + t.Errorf("expected nil, but received %v", v1) + } fcopy := awsutil.CopyOf(f) f3 := fcopy.(*Foo) - assert.Nil(t, f3.A) - assert.Nil(t, f3.B) - assert.Nil(t, f3.C) + if v1 := f3.A; v1 != nil { + t.Errorf("expected nil, but received %v", v1) + } + if v1 := f3.B; v1 != nil { + t.Errorf("expected nil, but received %v", v1) + } + if v1 := f3.C; v1 != nil { + t.Errorf("expected nil, but received %v", v1) + } } func TestCopyPrimitive(t *testing.T) { str := "hello" var s string awsutil.Copy(&s, &str) - assert.Equal(t, "hello", s) + if v1, v2 := "hello", s; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } } func TestCopyNil(t *testing.T) { var s string awsutil.Copy(&s, nil) - assert.Equal(t, "", s) + if v1, v2 := "", s; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } } func TestCopyReader(t *testing.T) { @@ -174,13 +232,21 @@ func TestCopyReader(t *testing.T) { var r io.Reader awsutil.Copy(&r, buf) b, err := ioutil.ReadAll(r) - assert.NoError(t, err) - assert.Equal(t, []byte("hello world"), b) + if err != nil { + t.Errorf("expected no error, but received %v", err) + } + if v1, v2 := []byte("hello world"), b; !bytes.Equal(v1, v2) { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } // empty bytes because this is not a deep copy b, err = ioutil.ReadAll(buf) - assert.NoError(t, err) - assert.Equal(t, []byte(""), b) + if err != nil { + t.Errorf("expected no error, but received %v", err) + } + if v1, v2 := []byte(""), b; !bytes.Equal(v1, v2) { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } } func TestCopyDifferentStructs(t *testing.T) { @@ -226,17 +292,39 @@ func TestCopyDifferentStructs(t *testing.T) { awsutil.Copy(&f2, f1) // Values are equal - assert.Equal(t, f2.A, f1.A) - assert.Equal(t, f2.B, f1.B) - assert.Equal(t, f2.C, f1.C) - assert.Equal(t, "unique", f1.SrcUnique) - assert.Equal(t, 1, f1.SameNameDiffType) - assert.Equal(t, 0, f2.DstUnique) - assert.Equal(t, "", f2.SameNameDiffType) - assert.Equal(t, int1, *f1.unexportedPtr) - assert.Nil(t, f2.unexportedPtr) - assert.Equal(t, int2, *f1.ExportedPtr) - assert.Equal(t, int2, *f2.ExportedPtr) + if v1, v2 := f2.A, f1.A; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := f2.B, f1.B; !reflect.DeepEqual(v1, v2) { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := f2.C, f1.C; !reflect.DeepEqual(v1, v2) { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := "unique", f1.SrcUnique; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := 1, f1.SameNameDiffType; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := 0, f2.DstUnique; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := "", f2.SameNameDiffType; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := int1, *f1.unexportedPtr; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1 := f2.unexportedPtr; v1 != nil { + t.Errorf("expected nil, but received %v", v1) + } + if v1, v2 := int2, *f1.ExportedPtr; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } + if v1, v2 := int2, *f2.ExportedPtr; v1 != v2 { + t.Errorf("expected values to be equivalent but received %v and %v", v1, v2) + } } func ExampleCopyOf() { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go index 7a5db6e49bc..18d3c5b8ecf 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal_test.go @@ -5,7 +5,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/stretchr/testify/assert" ) func TestDeepEqual(t *testing.T) { @@ -24,6 +23,8 @@ func TestDeepEqual(t *testing.T) { } for i, c := range cases { - assert.Equal(t, c.equal, awsutil.DeepEqual(c.a, c.b), "%d, a:%v b:%v, %t", i, c.a, c.b, c.equal) + if awsutil.DeepEqual(c.a, c.b) != c.equal { + t.Errorf("%d, a:%v b:%v, %t", i, c.a, c.b, c.equal) + } } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go index b2225566fa3..58a05d6cec5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go @@ -1,10 +1,10 @@ package awsutil_test import ( + "strings" "testing" "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/stretchr/testify/assert" ) type Struct struct { @@ -50,8 +50,12 @@ func TestValueAtPathSuccess(t *testing.T) { } for i, c := range testCases { v, err := awsutil.ValuesAtPath(c.data, c.path) - assert.NoError(t, err, "case %d, expected no error, %s", i, c.path) - assert.Equal(t, c.expect, v, "case %d, %s", i, c.path) + if err != nil { + t.Errorf("case %v, expected no error, %v", i, c.path) + } + if e, a := c.expect, v; !awsutil.DeepEqual(e, a) { + t.Errorf("case %v, %v", i, c.path) + } } } @@ -78,12 +82,18 @@ func TestValueAtPathFailure(t *testing.T) { for i, c := range testCases { v, err := awsutil.ValuesAtPath(c.data, c.path) if c.errContains != "" { - assert.Contains(t, err.Error(), c.errContains, "case %d, expected error, %s", i, c.path) + if !strings.Contains(err.Error(), c.errContains) { + t.Errorf("case %v, expected error, %v", i, c.path) + } continue } else { - assert.NoError(t, err, "case %d, expected no error, %s", i, c.path) + if err != nil { + t.Errorf("case %v, expected no error, %v", i, c.path) + } + } + if e, a := c.expect, v; !awsutil.DeepEqual(e, a) { + t.Errorf("case %v, %v", i, c.path) } - assert.Equal(t, c.expect, v, "case %d, %s", i, c.path) } } @@ -92,51 +102,81 @@ func TestSetValueAtPathSuccess(t *testing.T) { awsutil.SetValueAtPath(&s, "C", "test1") awsutil.SetValueAtPath(&s, "B.B.C", "test2") awsutil.SetValueAtPath(&s, "B.D.C", "test3") - assert.Equal(t, "test1", s.C) - assert.Equal(t, "test2", s.B.B.C) - assert.Equal(t, "test3", s.B.D.C) + if e, a := "test1", s.C; e != a { + t.Errorf("expected %v, but received %v", e, a) + } + if e, a := "test2", s.B.B.C; e != a { + t.Errorf("expected %v, but received %v", e, a) + } + if e, a := "test3", s.B.D.C; e != a { + t.Errorf("expected %v, but received %v", e, a) + } awsutil.SetValueAtPath(&s, "B.*.C", "test0") - assert.Equal(t, "test0", s.B.B.C) - assert.Equal(t, "test0", s.B.D.C) + if e, a := "test0", s.B.B.C; e != a { + t.Errorf("expected %v, but received %v", e, a) + } + if e, a := "test0", s.B.D.C; e != a { + t.Errorf("expected %v, but received %v", e, a) + } var s2 Struct awsutil.SetValueAtPath(&s2, "b.b.c", "test0") - assert.Equal(t, "test0", s2.B.B.C) + if e, a := "test0", s2.B.B.C; e != a { + t.Errorf("expected %v, but received %v", e, a) + } awsutil.SetValueAtPath(&s2, "A", []Struct{{}}) - assert.Equal(t, []Struct{{}}, s2.A) + if e, a := []Struct{{}}, s2.A; !awsutil.DeepEqual(e, a) { + t.Errorf("expected %v, but received %v", e, a) + } str := "foo" s3 := Struct{} awsutil.SetValueAtPath(&s3, "b.b.c", str) - assert.Equal(t, "foo", s3.B.B.C) + if e, a := "foo", s3.B.B.C; e != a { + t.Errorf("expected %v, but received %v", e, a) + } s3 = Struct{B: &Struct{B: &Struct{C: str}}} awsutil.SetValueAtPath(&s3, "b.b.c", nil) - assert.Equal(t, "", s3.B.B.C) + if e, a := "", s3.B.B.C; e != a { + t.Errorf("expected %v, but received %v", e, a) + } s3 = Struct{} awsutil.SetValueAtPath(&s3, "b.b.c", nil) - assert.Equal(t, "", s3.B.B.C) + if e, a := "", s3.B.B.C; e != a { + t.Errorf("expected %v, but received %v", e, a) + } s3 = Struct{} awsutil.SetValueAtPath(&s3, "b.b.c", &str) - assert.Equal(t, "foo", s3.B.B.C) + if e, a := "foo", s3.B.B.C; e != a { + t.Errorf("expected %v, but received %v", e, a) + } var s4 struct{ Name *string } awsutil.SetValueAtPath(&s4, "Name", str) - assert.Equal(t, str, *s4.Name) + if e, a := str, *s4.Name; e != a { + t.Errorf("expected %v, but received %v", e, a) + } s4 = struct{ Name *string }{} awsutil.SetValueAtPath(&s4, "Name", nil) - assert.Equal(t, (*string)(nil), s4.Name) + if e, a := (*string)(nil), s4.Name; e != a { + t.Errorf("expected %v, but received %v", e, a) + } s4 = struct{ Name *string }{Name: &str} awsutil.SetValueAtPath(&s4, "Name", nil) - assert.Equal(t, (*string)(nil), s4.Name) + if e, a := (*string)(nil), s4.Name; e != a { + t.Errorf("expected %v, but received %v", e, a) + } s4 = struct{ Name *string }{} awsutil.SetValueAtPath(&s4, "Name", &str) - assert.Equal(t, str, *s4.Name) + if e, a := str, *s4.Name; e != a { + t.Errorf("expected %v, but received %v", e, a) + } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go index 0046d2206f6..1a9461e1bab 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types_test.go @@ -1,10 +1,9 @@ package aws import ( + "reflect" "testing" "time" - - "github.com/stretchr/testify/assert" ) var testCasesStringSlice = [][]string{ @@ -18,14 +17,22 @@ func TestStringSlice(t *testing.T) { continue } out := StringSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := StringValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -39,22 +46,34 @@ func TestStringValueSlice(t *testing.T) { continue } out := StringValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + if out[i] != "" { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + if e, a := *(in[i]), out[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } out2 := StringSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out2 { if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + if *(out2[i]) != "" { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + if e, a := *in[i], *out2[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } } @@ -70,14 +89,22 @@ func TestStringMap(t *testing.T) { continue } out := StringMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := StringValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -91,14 +118,22 @@ func TestBoolSlice(t *testing.T) { continue } out := BoolSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := BoolValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -110,22 +145,34 @@ func TestBoolValueSlice(t *testing.T) { continue } out := BoolValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + if out[i] { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + if e, a := *(in[i]), out[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } out2 := BoolSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out2 { if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + if *(out2[i]) { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + if e, a := in[i], out2[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } } @@ -141,14 +188,22 @@ func TestBoolMap(t *testing.T) { continue } out := BoolMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := BoolValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -162,14 +217,22 @@ func TestIntSlice(t *testing.T) { continue } out := IntSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := IntValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -181,22 +244,34 @@ func TestIntValueSlice(t *testing.T) { continue } out := IntValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + if out[i] != 0 { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + if e, a := *(in[i]), out[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } out2 := IntSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out2 { if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + if *(out2[i]) != 0 { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + if e, a := in[i], out2[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } } @@ -212,14 +287,22 @@ func TestIntMap(t *testing.T) { continue } out := IntMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := IntValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -233,14 +316,22 @@ func TestInt64Slice(t *testing.T) { continue } out := Int64Slice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := Int64ValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -252,22 +343,34 @@ func TestInt64ValueSlice(t *testing.T) { continue } out := Int64ValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + if out[i] != 0 { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + if e, a := *(in[i]), out[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } out2 := Int64Slice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out2 { if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + if *(out2[i]) != 0 { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + if e, a := in[i], out2[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } } @@ -283,14 +386,22 @@ func TestInt64Map(t *testing.T) { continue } out := Int64Map(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := Int64ValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -304,14 +415,22 @@ func TestFloat64Slice(t *testing.T) { continue } out := Float64Slice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := Float64ValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -323,22 +442,34 @@ func TestFloat64ValueSlice(t *testing.T) { continue } out := Float64ValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + if out[i] != 0 { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + if e, a := *(in[i]), out[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } out2 := Float64Slice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out2 { if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + if *(out2[i]) != 0 { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + if e, a := in[i], out2[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } } @@ -354,14 +485,22 @@ func TestFloat64Map(t *testing.T) { continue } out := Float64Map(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := Float64ValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -375,14 +514,22 @@ func TestTimeSlice(t *testing.T) { continue } out := TimeSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := TimeValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -394,22 +541,34 @@ func TestTimeValueSlice(t *testing.T) { continue } out := TimeValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + if !out[i].IsZero() { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + if e, a := *(in[i]), out[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } out2 := TimeSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out2 { if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + if !(*(out2[i])).IsZero() { + t.Errorf("Unexpected value at idx %d", idx) + } } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + if e, a := in[i], out2[i]; e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } } } @@ -425,14 +584,22 @@ func TestTimeMap(t *testing.T) { continue } out := TimeMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + if e, a := len(out), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + if e, a := in[i], *(out[i]); e != a { + t.Errorf("Unexpected value at idx %d", idx) + } } out2 := TimeValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + if e, a := len(out2), len(in); e != a { + t.Errorf("Unexpected len at idx %d", idx) + } + if e, a := in, out2; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected value at idx %d", idx) + } } } @@ -458,13 +625,17 @@ var testCasesTimeValue = []TimeValueTestCase{ func TestSecondsTimeValue(t *testing.T) { for idx, testCase := range testCasesTimeValue { out := SecondsTimeValue(&testCase.in) - assert.Equal(t, testCase.outSecs, out, "Unexpected value for time value at %d", idx) + if e, a := testCase.outSecs, out; e != a { + t.Errorf("Unexpected value for time value at %d", idx) + } } } func TestMillisecondsTimeValue(t *testing.T) { for idx, testCase := range testCasesTimeValue { out := MillisecondsTimeValue(&testCase.in) - assert.Equal(t, testCase.outMillis, out, "Unexpected value for time value at %d", idx) + if e, a := testCase.outMillis, out; e != a { + t.Errorf("Unexpected value for time value at %d", idx) + } } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go index fe7d3c9bc53..1d715c9f3e5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers_test.go @@ -7,11 +7,10 @@ import ( "net/http" "net/http/httptest" "os" + "strings" "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/corehandlers" @@ -32,7 +31,9 @@ func TestValidateEndpointHandler(t *testing.T) { req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) err := req.Build() - assert.NoError(t, err) + if err != nil { + t.Errorf("expect no error, got %v", err) + } } func TestValidateEndpointHandlerErrorRegion(t *testing.T) { @@ -45,8 +46,12 @@ func TestValidateEndpointHandlerErrorRegion(t *testing.T) { req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) err := req.Build() - assert.Error(t, err) - assert.Equal(t, aws.ErrMissingRegion, err) + if err == nil { + t.Errorf("expect error, got none") + } + if e, a := aws.ErrMissingRegion, err; e != a { + t.Errorf("expect %v to be %v", e, a) + } } type mockCredsProvider struct { @@ -82,18 +87,30 @@ func TestAfterRetryRefreshCreds(t *testing.T) { }) svc.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) - assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired") - assert.False(t, credProvider.retrieveCalled) + if !svc.Config.Credentials.IsExpired() { + t.Errorf("Expect to start out expired") + } + if credProvider.retrieveCalled { + t.Errorf("expect not called") + } req := svc.NewRequest(&request.Operation{Name: "Operation"}, nil, nil) req.Send() - assert.True(t, svc.Config.Credentials.IsExpired()) - assert.False(t, credProvider.retrieveCalled) + if !svc.Config.Credentials.IsExpired() { + t.Errorf("Expect to start out expired") + } + if credProvider.retrieveCalled { + t.Errorf("expect not called") + } _, err := svc.Config.Credentials.Get() - assert.NoError(t, err) - assert.True(t, credProvider.retrieveCalled) + if err != nil { + t.Errorf("expect no error, got %v", err) + } + if !credProvider.retrieveCalled { + t.Errorf("expect not called") + } } func TestAfterRetryWithContextCanceled(t *testing.T) { @@ -202,8 +219,12 @@ func TestSendHandlerError(t *testing.T) { r.Send() - assert.Error(t, r.Error) - assert.NotNil(t, r.HTTPResponse) + if r.Error == nil { + t.Errorf("expect error, got none") + } + if r.HTTPResponse == nil { + t.Errorf("expect response, got none") + } } func TestSendWithoutFollowRedirects(t *testing.T) { @@ -273,31 +294,47 @@ func TestValidateReqSigHandler(t *testing.T) { corehandlers.ValidateReqSigHandler.Fn(c.Req) - assert.NoError(t, c.Req.Error, "%d, expect no error", i) - assert.Equal(t, c.Resign, resigned, "%d, expected resigning to match", i) + if c.Req.Error != nil { + t.Errorf("expect no error, got %v", c.Req.Error) + } + if e, a := c.Resign, resigned; e != a { + t.Errorf("%d, expect %v to be %v", i, e, a) + } } } func setupContentLengthTestServer(t *testing.T, hasContentLength bool, contentLength int64) *httptest.Server { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _, ok := r.Header["Content-Length"] - assert.Equal(t, hasContentLength, ok, "expect content length to be set, %t", hasContentLength) + if e, a := hasContentLength, ok; e != a { + t.Errorf("expect %v to be %v", e, a) + } if hasContentLength { - assert.Equal(t, contentLength, r.ContentLength) + if e, a := contentLength, r.ContentLength; e != a { + t.Errorf("expect %v to be %v", e, a) + } } b, err := ioutil.ReadAll(r.Body) - assert.NoError(t, err) + if err != nil { + t.Errorf("expect no error, got %v", err) + } r.Body.Close() authHeader := r.Header.Get("Authorization") if hasContentLength { - assert.Contains(t, authHeader, "content-length") + if e, a := "content-length", authHeader; !strings.Contains(a, e) { + t.Errorf("expect %v to be in %v", e, a) + } } else { - assert.NotContains(t, authHeader, "content-length") + if e, a := "content-length", authHeader; strings.Contains(a, e) { + t.Errorf("expect %v to not be in %v", e, a) + } } - assert.Equal(t, contentLength, int64(len(b))) + if e, a := contentLength, int64(len(b)); e != a { + t.Errorf("expect %v to be %v", e, a) + } })) return server @@ -316,7 +353,9 @@ func TestBuildContentLength_ZeroBody(t *testing.T) { Key: aws.String("keyname"), }) - assert.NoError(t, err) + if err != nil { + t.Errorf("expect no error, got %v", err) + } } func TestBuildContentLength_NegativeBody(t *testing.T) { @@ -334,7 +373,9 @@ func TestBuildContentLength_NegativeBody(t *testing.T) { req.HTTPRequest.Header.Set("Content-Length", "-1") - assert.NoError(t, req.Send()) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } } func TestBuildContentLength_WithBody(t *testing.T) { @@ -351,5 +392,7 @@ func TestBuildContentLength_WithBody(t *testing.T) { Body: bytes.NewReader(make([]byte, 1024)), }) - assert.NoError(t, err) + if err != nil { + t.Errorf("expect no error, got %v", err) + } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go index 66973ca01f7..e1d8a08fd99 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator_test.go @@ -3,8 +3,7 @@ package corehandlers_test import ( "fmt" "testing" - - "github.com/stretchr/testify/assert" + "reflect" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -14,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/awstesting/unit" "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/stretchr/testify/require" ) var testSvc = func() *client.Client { @@ -113,7 +111,9 @@ func TestNoErrors(t *testing.T) { req := testSvc.NewRequest(&request.Operation{}, input, nil) corehandlers.ValidateParametersHandler.Fn(req) - require.NoError(t, req.Error) + if req.Error != nil { + t.Fatalf("expect no error, got %v", req.Error) + } } func TestMissingRequiredParameters(t *testing.T) { @@ -121,17 +121,33 @@ func TestMissingRequiredParameters(t *testing.T) { req := testSvc.NewRequest(&request.Operation{}, input, nil) corehandlers.ValidateParametersHandler.Fn(req) - require.Error(t, req.Error) - assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) - assert.Equal(t, "3 validation error(s) found.", req.Error.(awserr.Error).Message()) + if req.Error == nil { + t.Fatalf("expect error") + } + if e, a := "InvalidParameter", req.Error.(awserr.Error).Code(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "3 validation error(s) found.", req.Error.(awserr.Error).Message(); e != a { + t.Errorf("expect %v, got %v", e, a) + } errs := req.Error.(awserr.BatchedErrors).OrigErrs() - assert.Len(t, errs, 3) - assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredList.", errs[0].Error()) - assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredMap.", errs[1].Error()) - assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredBool.", errs[2].Error()) + if e, a := 3, len(errs); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "ParamRequiredError: missing required field, StructShape.RequiredList.", errs[0].Error(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "ParamRequiredError: missing required field, StructShape.RequiredMap.", errs[1].Error(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "ParamRequiredError: missing required field, StructShape.RequiredBool.", errs[2].Error(); e != a { + t.Errorf("expect %v, got %v", e, a) + } - assert.Equal(t, "InvalidParameter: 3 validation error(s) found.\n- missing required field, StructShape.RequiredList.\n- missing required field, StructShape.RequiredMap.\n- missing required field, StructShape.RequiredBool.\n", req.Error.Error()) + if e, a := "InvalidParameter: 3 validation error(s) found.\n- missing required field, StructShape.RequiredList.\n- missing required field, StructShape.RequiredMap.\n- missing required field, StructShape.RequiredBool.\n", req.Error.Error(); e != a { + t.Errorf("expect %v, got %v", e, a) + } } func TestNestedMissingRequiredParameters(t *testing.T) { @@ -148,15 +164,29 @@ func TestNestedMissingRequiredParameters(t *testing.T) { req := testSvc.NewRequest(&request.Operation{}, input, nil) corehandlers.ValidateParametersHandler.Fn(req) - require.Error(t, req.Error) - assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) - assert.Equal(t, "3 validation error(s) found.", req.Error.(awserr.Error).Message()) + if req.Error == nil { + t.Fatalf("expect error") + } + if e, a := "InvalidParameter", req.Error.(awserr.Error).Code(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "3 validation error(s) found.", req.Error.(awserr.Error).Message(); e != a { + t.Errorf("expect %v, got %v", e, a) + } errs := req.Error.(awserr.BatchedErrors).OrigErrs() - assert.Len(t, errs, 3) - assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredList[0].Name.", errs[0].Error()) - assert.Equal(t, "ParamRequiredError: missing required field, StructShape.RequiredMap[key2].Name.", errs[1].Error()) - assert.Equal(t, "ParamRequiredError: missing required field, StructShape.OptionalStruct.Name.", errs[2].Error()) + if e, a := 3, len(errs); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "ParamRequiredError: missing required field, StructShape.RequiredList[0].Name.", errs[0].Error(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "ParamRequiredError: missing required field, StructShape.RequiredMap[key2].Name.", errs[1].Error(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "ParamRequiredError: missing required field, StructShape.OptionalStruct.Name.", errs[2].Error(); e != a { + t.Errorf("expect %v, got %v", e, a) + } } type testInput struct { @@ -226,7 +256,9 @@ func TestValidateFieldMinParameter(t *testing.T) { req := testSvc.NewRequest(&request.Operation{}, &c.in, nil) corehandlers.ValidateParametersHandler.Fn(req) - assert.Equal(t, c.err, req.Error, "%d case failed", i) + if e, a := c.err, req.Error; !reflect.DeepEqual(e,a) { + t.Errorf("%d, expect %v, got %v", i, e, a) + } } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go index 35e75782b9b..7dde1fb36b8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api_test.go @@ -11,8 +11,6 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/ec2metadata" @@ -71,8 +69,12 @@ func TestEndpoint(t *testing.T) { } req := c.NewRequest(op, nil, nil) - assert.Equal(t, "http://169.254.169.254/latest", req.ClientInfo.Endpoint) - assert.Equal(t, "http://169.254.169.254/latest/meta-data/testpath", req.HTTPRequest.URL.String()) + if e, a := "http://169.254.169.254/latest", req.ClientInfo.Endpoint; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "http://169.254.169.254/latest/meta-data/testpath", req.HTTPRequest.URL.String(); e != a { + t.Errorf("expect %v, got %v", e, a) + } } func TestGetMetadata(t *testing.T) { @@ -85,8 +87,12 @@ func TestGetMetadata(t *testing.T) { resp, err := c.GetMetadata("some/path") - assert.NoError(t, err) - assert.Equal(t, "success", resp) + if err != nil { + t.Errorf("expect no error, got %v", err) + } + if e, a := "success", resp; e != a { + t.Errorf("expect %v, got %v", e, a) + } } func TestGetUserData(t *testing.T) { @@ -99,8 +105,12 @@ func TestGetUserData(t *testing.T) { resp, err := c.GetUserData() - assert.NoError(t, err) - assert.Equal(t, "success", resp) + if err != nil { + t.Errorf("expect no error, got %v", err) + } + if e, a := "success", resp; e != a { + t.Errorf("expect %v, got %v", e, a) + } } func TestGetUserData_Error(t *testing.T) { @@ -126,12 +136,17 @@ func TestGetUserData_Error(t *testing.T) { c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) resp, err := c.GetUserData() - assert.Error(t, err) - assert.Empty(t, resp) + if err == nil { + t.Errorf("expect error") + } + if len(resp) != 0 { + t.Errorf("expect empty, got %v", resp) + } - aerr, ok := err.(awserr.Error) - assert.True(t, ok) - assert.Equal(t, "NotFoundError", aerr.Code()) + aerr := err.(awserr.Error) + if e, a := "NotFoundError", aerr.Code(); e != a { + t.Errorf("expect %v, got %v", e, a) + } } func TestGetRegion(t *testing.T) { @@ -144,8 +159,12 @@ func TestGetRegion(t *testing.T) { region, err := c.Region() - assert.NoError(t, err) - assert.Equal(t, "us-west-2", region) + if err != nil { + t.Errorf("expect no error, got %v", err) + } + if e, a := "us-west-2", region; e != a { + t.Errorf("expect %v, got %v", e, a) + } } func TestMetadataAvailable(t *testing.T) { @@ -156,9 +175,9 @@ func TestMetadataAvailable(t *testing.T) { defer server.Close() c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) - available := c.Available() - - assert.True(t, available) + if !c.Available() { + t.Errorf("expect available") + } } func TestMetadataIAMInfo_success(t *testing.T) { @@ -170,10 +189,18 @@ func TestMetadataIAMInfo_success(t *testing.T) { c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) iamInfo, err := c.IAMInfo() - assert.NoError(t, err) - assert.Equal(t, "Success", iamInfo.Code) - assert.Equal(t, "arn:aws:iam::123456789012:instance-profile/my-instance-profile", iamInfo.InstanceProfileArn) - assert.Equal(t, "AIPAABCDEFGHIJKLMN123", iamInfo.InstanceProfileID) + if err != nil { + t.Errorf("expect no error, got %v", err) + } + if e, a := "Success", iamInfo.Code; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "arn:aws:iam::123456789012:instance-profile/my-instance-profile", iamInfo.InstanceProfileArn; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "AIPAABCDEFGHIJKLMN123", iamInfo.InstanceProfileID; e != a { + t.Errorf("expect %v, got %v", e, a) + } } func TestMetadataIAMInfo_failure(t *testing.T) { @@ -185,10 +212,18 @@ func TestMetadataIAMInfo_failure(t *testing.T) { c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) iamInfo, err := c.IAMInfo() - assert.NotNil(t, err) - assert.Equal(t, "", iamInfo.Code) - assert.Equal(t, "", iamInfo.InstanceProfileArn) - assert.Equal(t, "", iamInfo.InstanceProfileID) + if err == nil { + t.Errorf("expect error") + } + if e, a := "", iamInfo.Code; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "", iamInfo.InstanceProfileArn; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "", iamInfo.InstanceProfileID; e != a { + t.Errorf("expect %v, got %v", e, a) + } } func TestMetadataNotAvailable(t *testing.T) { @@ -204,9 +239,9 @@ func TestMetadataNotAvailable(t *testing.T) { r.Retryable = aws.Bool(true) // network errors are retryable }) - available := c.Available() - - assert.False(t, available) + if c.Available() { + t.Errorf("expect not available") + } } func TestMetadataErrorResponse(t *testing.T) { @@ -222,8 +257,12 @@ func TestMetadataErrorResponse(t *testing.T) { }) data, err := c.GetMetadata("uri/path") - assert.Empty(t, data) - assert.Contains(t, err.Error(), "error message text") + if len(data) != 0 { + t.Errorf("expect empty, got %v", data) + } + if e, a := "error message text", err.Error(); !strings.Contains(a, e) { + t.Errorf("expect %v to be in %v", e, a) + } } func TestEC2RoleProviderInstanceIdentity(t *testing.T) { @@ -235,8 +274,16 @@ func TestEC2RoleProviderInstanceIdentity(t *testing.T) { c := ec2metadata.New(unit.Session, &aws.Config{Endpoint: aws.String(server.URL + "/latest")}) doc, err := c.GetInstanceIdentityDocument() - assert.Nil(t, err, "Expect no error, %v", err) - assert.Equal(t, doc.AccountID, "123456789012") - assert.Equal(t, doc.AvailabilityZone, "us-east-1d") - assert.Equal(t, doc.Region, "us-east-1") + if err != nil { + t.Errorf("expect no error, got %v", err) + } + if e, a := doc.AccountID, "123456789012"; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := doc.AvailabilityZone, "us-east-1d"; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := doc.Region, "us-east-1"; e != a { + t.Errorf("expect %v, got %v", e, a) + } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 771c122d467..64c7f42ef95 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -319,6 +319,8 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -436,10 +438,17 @@ var awsPartition = partition{ "cloudhsmv2": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "cloudsearch": service{ @@ -710,6 +719,7 @@ var awsPartition = partition{ "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -777,6 +787,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -793,6 +804,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -1008,6 +1020,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, @@ -1999,6 +2012,7 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "es": service{}, "events": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go index d0ab360e265..9612b315003 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session_test.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/awstesting" "github.com/aws/aws-sdk-go/service/s3" ) @@ -89,14 +90,31 @@ func TestSessionCopy(t *testing.T) { } func TestSessionClientConfig(t *testing.T) { - s, err := NewSession(&aws.Config{Region: aws.String("orig_region")}) + s, err := NewSession(&aws.Config{ + Credentials: credentials.AnonymousCredentials, + Region: aws.String("orig_region"), + EndpointResolver: endpoints.ResolverFunc( + func(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { + if e, a := "mock-service", service; e != a { + t.Errorf("expect %q service, got %q", e, a) + } + if e, a := "other-region", region; e != a { + t.Errorf("expect %q region, got %q", e, a) + } + return endpoints.ResolvedEndpoint{ + URL: "https://" + service + "." + region + ".amazonaws.com", + SigningRegion: region, + }, nil + }, + ), + }) assert.NoError(t, err) - cfg := s.ClientConfig("s3", &aws.Config{Region: aws.String("us-west-2")}) + cfg := s.ClientConfig("mock-service", &aws.Config{Region: aws.String("other-region")}) - assert.Equal(t, "https://s3-us-west-2.amazonaws.com", cfg.Endpoint) - assert.Equal(t, "us-west-2", cfg.SigningRegion) - assert.Equal(t, "us-west-2", *cfg.Config.Region) + assert.Equal(t, "https://mock-service.other-region.amazonaws.com", cfg.Endpoint) + assert.Equal(t, "other-region", cfg.SigningRegion) + assert.Equal(t, "other-region", *cfg.Config.Region) } func TestNewSession_NoCredentials(t *testing.T) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_1_5_test.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_1_5_test.go index 2d4621c64fa..2e591c27f58 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_1_5_test.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_1_5_test.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/awstesting/unit" - "github.com/stretchr/testify/assert" ) func TestStandaloneSign(t *testing.T) { @@ -22,7 +21,9 @@ func TestStandaloneSign(t *testing.T) { c.SubDomain, c.Region, c.Service) req, err := http.NewRequest("GET", host, nil) - assert.NoError(t, err) + if err != nil { + t.Errorf("expected no error, but received %v", err) + } // URL.EscapedPath() will be used by the signer to get the // escaped form of the request's URI path. @@ -30,12 +31,20 @@ func TestStandaloneSign(t *testing.T) { req.URL.RawQuery = c.OrigQuery _, err = signer.Sign(req, nil, c.Service, c.Region, time.Unix(0, 0)) - assert.NoError(t, err) + if err != nil { + t.Errorf("expected no error, but received %v", err) + } actual := req.Header.Get("Authorization") - assert.Equal(t, c.ExpSig, actual) - assert.Equal(t, c.OrigURI, req.URL.Path) - assert.Equal(t, c.EscapedURI, req.URL.EscapedPath()) + if e, a := c.ExpSig, actual; e != a { + t.Errorf("expected %v, but recieved %v", e, a) + } + if e, a := c.OrigURI, req.URL.Path; e != a { + t.Errorf("expected %v, but recieved %v", e, a) + } + if e, a := c.EscapedURI, req.URL.EscapedPath(); e != a { + t.Errorf("expected %v, but recieved %v", e, a) + } } } @@ -48,7 +57,9 @@ func TestStandaloneSign_RawPath(t *testing.T) { c.SubDomain, c.Region, c.Service) req, err := http.NewRequest("GET", host, nil) - assert.NoError(t, err) + if err != nil { + t.Errorf("expected no error, but received %v", err) + } // URL.EscapedPath() will be used by the signer to get the // escaped form of the request's URI path. @@ -57,11 +68,19 @@ func TestStandaloneSign_RawPath(t *testing.T) { req.URL.RawQuery = c.OrigQuery _, err = signer.Sign(req, nil, c.Service, c.Region, time.Unix(0, 0)) - assert.NoError(t, err) + if err != nil { + t.Errorf("expected no error, but received %v", err) + } actual := req.Header.Get("Authorization") - assert.Equal(t, c.ExpSig, actual) - assert.Equal(t, c.OrigURI, req.URL.Path) - assert.Equal(t, c.EscapedURI, req.URL.EscapedPath()) + if e, a := c.ExpSig, actual; e != a { + t.Errorf("expected %v, but recieved %v", e, a) + } + if e, a := c.OrigURI, req.URL.Path; e != a { + t.Errorf("expected %v, but recieved %v", e, a) + } + if e, a := c.EscapedURI, req.URL.EscapedPath(); e != a { + t.Errorf("expected %v, but recieved %v", e, a) + } } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules_test.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules_test.go index 7dfddc87e5a..f4be951acac 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules_test.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules_test.go @@ -2,8 +2,6 @@ package v4 import ( "testing" - - "github.com/stretchr/testify/assert" ) func TestRuleCheckWhitelist(t *testing.T) { @@ -13,8 +11,12 @@ func TestRuleCheckWhitelist(t *testing.T) { }, } - assert.True(t, w.IsValid("Cache-Control")) - assert.False(t, w.IsValid("Cache-")) + if !w.IsValid("Cache-Control") { + t.Error("expected true value") + } + if w.IsValid("Cache-") { + t.Error("expected false value") + } } func TestRuleCheckBlacklist(t *testing.T) { @@ -24,16 +26,26 @@ func TestRuleCheckBlacklist(t *testing.T) { }, } - assert.False(t, b.IsValid("Cache-Control")) - assert.True(t, b.IsValid("Cache-")) + if b.IsValid("Cache-Control") { + t.Error("expected false value") + } + if !b.IsValid("Cache-") { + t.Error("expected true value") + } } func TestRuleCheckPattern(t *testing.T) { p := patterns{"X-Amz-Meta-"} - assert.True(t, p.IsValid("X-Amz-Meta-")) - assert.True(t, p.IsValid("X-Amz-Meta-Star")) - assert.False(t, p.IsValid("Cache-")) + if !p.IsValid("X-Amz-Meta-") { + t.Error("expected true value") + } + if !p.IsValid("X-Amz-Meta-Star") { + t.Error("expected true value") + } + if p.IsValid("Cache-") { + t.Error("expected false value") + } } func TestRuleComplexWhitelist(t *testing.T) { @@ -50,8 +62,16 @@ func TestRuleComplexWhitelist(t *testing.T) { inclusiveRules{patterns{"X-Amz-"}, blacklist{w}}, } - assert.True(t, r.IsValid("X-Amz-Blah")) - assert.False(t, r.IsValid("X-Amz-Meta-")) - assert.False(t, r.IsValid("X-Amz-Meta-Star")) - assert.False(t, r.IsValid("Cache-Control")) + if !r.IsValid("X-Amz-Blah") { + t.Error("expected true value") + } + if r.IsValid("X-Amz-Meta-") { + t.Error("expected false value") + } + if r.IsValid("X-Amz-Meta-Star") { + t.Error("expected false value") + } + if r.IsValid("Cache-Control") { + t.Error("expected false value") + } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types_test.go b/vendor/github.com/aws/aws-sdk-go/aws/types_test.go index a7cd93b83c1..e399ef57376 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types_test.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types_test.go @@ -1,32 +1,49 @@ package aws import ( + "bytes" "math/rand" "testing" - - "github.com/stretchr/testify/assert" ) func TestWriteAtBuffer(t *testing.T) { b := &WriteAtBuffer{} n, err := b.WriteAt([]byte{1}, 0) - assert.NoError(t, err) - assert.Equal(t, 1, n) + if err != nil { + t.Errorf("expected no error, but received %v", err) + } + if e, a := 1, n; e != a { + t.Errorf("expected %d, but recieved %d", e, a) + } n, err = b.WriteAt([]byte{1, 1, 1}, 5) - assert.NoError(t, err) - assert.Equal(t, 3, n) + if err != nil { + t.Errorf("expected no error, but received %v", err) + } + if e, a := 3, n; e != a { + t.Errorf("expected %d, but recieved %d", e, a) + } n, err = b.WriteAt([]byte{2}, 1) - assert.NoError(t, err) - assert.Equal(t, 1, n) + if err != nil { + t.Errorf("expected no error, but received %v", err) + } + if e, a := 1, n; e != a { + t.Errorf("expected %d, but recieved %d", e, a) + } n, err = b.WriteAt([]byte{3}, 2) - assert.NoError(t, err) - assert.Equal(t, 1, n) + if err != nil { + t.Errorf("expected no error, but received %v", err) + } + if e, a := 1, n; e != a { + t.Errorf("expected %d, but received %d", e, a) + } - assert.Equal(t, []byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes()) + if !bytes.Equal([]byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes()) { + t.Errorf("expected %v, but received %v", []byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes()) + } } func BenchmarkWriteAtBuffer(b *testing.B) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 5d6b17f327e..d7562331da7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.0" +const SDKVersion = "1.12.19" diff --git a/vendor/github.com/aws/aws-sdk-go/buildspec.yml b/vendor/github.com/aws/aws-sdk-go/buildspec.yml new file mode 100644 index 00000000000..427208edf06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/buildspec.yml @@ -0,0 +1,21 @@ +version: 0.2 + +phases: + build: + commands: + - echo Build started on `date` + - export GOPATH=/go + - export SDK_CB_ROOT=`pwd` + - export SDK_GO_ROOT=/go/src/github.com/aws/aws-sdk-go + - mkdir -p /go/src/github.com/aws + - ln -s $SDK_CB_ROOT $SDK_GO_ROOT + - cd $SDK_GO_ROOT + - make unit + - cd $SDK_CB_ROOT + - #echo Compiling the Go code... + post_build: + commands: + - echo Build completed on `date` +#artifacts: +# files: +# - hello diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go index 17711164604..93fe83fd5c6 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build_test.go @@ -24,7 +24,6 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/ec2query" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/util" - "github.com/stretchr/testify/assert" ) var _ bytes.Buffer // always import bytes @@ -1476,10 +1475,14 @@ func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { // build request ec2query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body))) @@ -1502,10 +1505,14 @@ func TestInputService2ProtocolTestStructureWithLocationNameAndQueryNameAppliedTo // build request ec2query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&BarLocationName=val2&Foo=val1&Version=2014-01-01&yuckQueryName=val3`, util.Trim(string(body))) @@ -1528,10 +1535,14 @@ func TestInputService3ProtocolTestNestedStructureMembersCase1(t *testing.T) { // build request ec2query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&Struct.Scalar=foo&Version=2014-01-01`, util.Trim(string(body))) @@ -1556,10 +1567,14 @@ func TestInputService4ProtocolTestListTypesCase1(t *testing.T) { // build request ec2query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=foo&ListArg.2=bar&ListArg.3=baz&Version=2014-01-01`, util.Trim(string(body))) @@ -1584,10 +1599,14 @@ func TestInputService5ProtocolTestListWithLocationNameAppliedToMemberCase1(t *te // build request ec2query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&ListMemberName.1=a&ListMemberName.2=b&ListMemberName.3=c&Version=2014-01-01`, util.Trim(string(body))) @@ -1612,10 +1631,14 @@ func TestInputService6ProtocolTestListWithLocationNameAndQueryNameCase1(t *testi // build request ec2query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&ListQueryName.1=a&ListQueryName.2=b&ListQueryName.3=c&Version=2014-01-01`, util.Trim(string(body))) @@ -1636,10 +1659,14 @@ func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { // build request ec2query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body))) @@ -1660,10 +1687,14 @@ func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) { // build request ec2query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body))) @@ -1684,10 +1715,14 @@ func TestInputService9ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { // build request ec2query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&Token=abc123&Version=2014-01-01`, util.Trim(string(body))) @@ -1706,10 +1741,14 @@ func TestInputService9ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { // build request ec2query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&Token=00000000-0000-4000-8000-000000000000&Version=2014-01-01`, util.Trim(string(body))) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go index e87d00b1443..a63b32985cf 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal_test.go @@ -24,7 +24,6 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/ec2query" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/util" - "github.com/stretchr/testify/assert" ) var _ bytes.Buffer // always import bytes @@ -1417,18 +1416,38 @@ func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { // unmarshal response ec2query.UnmarshalMeta(req) ec2query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.Char) - assert.Equal(t, 1.3, *out.Double) - assert.Equal(t, false, *out.FalseBool) - assert.Equal(t, 1.2, *out.Float) - assert.Equal(t, int64(200), *out.Long) - assert.Equal(t, int64(123), *out.Num) - assert.Equal(t, "myname", *out.Str) - assert.Equal(t, true, *out.TrueBool) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "a", *out.Char; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := 1.3, *out.Double; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := false, *out.FalseBool; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := 1.2, *out.Float; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(200), *out.Long; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(123), *out.Num; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "myname", *out.Str; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := true, *out.TrueBool; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1444,11 +1463,17 @@ func TestOutputService2ProtocolTestBlobCase1(t *testing.T) { // unmarshal response ec2query.UnmarshalMeta(req) ec2query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "value", string(out.Blob)) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "value", string(out.Blob); e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1464,12 +1489,20 @@ func TestOutputService3ProtocolTestListsCase1(t *testing.T) { // unmarshal response ec2query.UnmarshalMeta(req) ec2query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "abc", *out.ListMember[0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "123", *out.ListMember[1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1485,12 +1518,20 @@ func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { // unmarshal response ec2query.UnmarshalMeta(req) ec2query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "abc", *out.ListMember[0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "123", *out.ListMember[1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1506,12 +1547,20 @@ func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) { // unmarshal response ec2query.UnmarshalMeta(req) ec2query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "abc", *out.ListMember[0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "123", *out.ListMember[1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1527,12 +1576,20 @@ func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) { // unmarshal response ec2query.UnmarshalMeta(req) ec2query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"].Foo) - assert.Equal(t, "bar", *out.Map["qux"].Foo) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "bam", *out.Map["baz"].Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "bar", *out.Map["qux"].Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1548,12 +1605,20 @@ func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) { // unmarshal response ec2query.UnmarshalMeta(req) ec2query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"]) - assert.Equal(t, "bar", *out.Map["qux"]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "bam", *out.Map["baz"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "bar", *out.Map["qux"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1569,12 +1634,20 @@ func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) { // unmarshal response ec2query.UnmarshalMeta(req) ec2query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"]) - assert.Equal(t, "bar", *out.Map["qux"]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "bam", *out.Map["baz"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "bar", *out.Map["qux"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1590,10 +1663,16 @@ func TestOutputService9ProtocolTestEmptyStringCase1(t *testing.T) { // unmarshal response ec2query.UnmarshalMeta(req) ec2query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "", *out.Foo) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "", *out.Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go index 3cd4036b0e0..e1bd3cd78ec 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/build_test.go @@ -24,7 +24,6 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/util" - "github.com/stretchr/testify/assert" ) var _ bytes.Buffer // always import bytes @@ -1654,10 +1653,14 @@ func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Name":"myname"}`, util.Trim(string(body))) @@ -1665,8 +1668,12 @@ func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -1680,10 +1687,14 @@ func TestInputService2ProtocolTestTimestampValuesCase1(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body))) @@ -1691,8 +1702,12 @@ func TestInputService2ProtocolTestTimestampValuesCase1(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -1706,10 +1721,14 @@ func TestInputService3ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"BlobArg":"Zm9v"}`, util.Trim(string(body))) @@ -1717,8 +1736,12 @@ func TestInputService3ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -1735,10 +1758,14 @@ func TestInputService3ProtocolTestBase64EncodedBlobsCase2(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"BlobMap":{"key1":"Zm9v","key2":"YmFy"}}`, util.Trim(string(body))) @@ -1746,8 +1773,12 @@ func TestInputService3ProtocolTestBase64EncodedBlobsCase2(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -1764,10 +1795,14 @@ func TestInputService4ProtocolTestNestedBlobsCase1(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"ListParam":["Zm9v","YmFy"]}`, util.Trim(string(body))) @@ -1775,8 +1810,12 @@ func TestInputService4ProtocolTestNestedBlobsCase1(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -1792,10 +1831,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase1(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body))) @@ -1803,8 +1846,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase1(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -1822,10 +1869,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase2(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body))) @@ -1833,8 +1884,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase2(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -1856,10 +1911,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase3(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body))) @@ -1867,8 +1926,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase3(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -1891,10 +1954,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase4(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body))) @@ -1902,8 +1969,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase4(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -1928,10 +1999,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase5(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body))) @@ -1939,8 +2014,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase5(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -1963,10 +2042,14 @@ func TestInputService5ProtocolTestRecursiveShapesCase6(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body))) @@ -1974,8 +2057,12 @@ func TestInputService5ProtocolTestRecursiveShapesCase6(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -1989,10 +2076,14 @@ func TestInputService6ProtocolTestEmptyMapsCase1(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Map":{}}`, util.Trim(string(body))) @@ -2000,8 +2091,12 @@ func TestInputService6ProtocolTestEmptyMapsCase1(t *testing.T) { awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "application/x-amz-json-1.1", r.Header.Get("Content-Type")) - assert.Equal(t, "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target")) + if e, a := "application/x-amz-json-1.1", r.Header.Get("Content-Type"); e != a { + t.Errorf("expect %v to be %v", e, a) + } + if e, a := "com.amazonaws.foo.OperationName", r.Header.Get("X-Amz-Target"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -2015,10 +2110,14 @@ func TestInputService7ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Token":"abc123"}`, util.Trim(string(body))) @@ -2037,10 +2136,14 @@ func TestInputService7ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { // build request jsonrpc.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Token":"00000000-0000-4000-8000-000000000000"}`, util.Trim(string(body))) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go index 3190f212cfe..e3eb8e583e5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_test.go @@ -24,7 +24,6 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/util" - "github.com/stretchr/testify/assert" ) var _ bytes.Buffer // always import bytes @@ -1109,18 +1108,38 @@ func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { // unmarshal response jsonrpc.UnmarshalMeta(req) jsonrpc.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.Char) - assert.Equal(t, 1.3, *out.Double) - assert.Equal(t, false, *out.FalseBool) - assert.Equal(t, 1.2, *out.Float) - assert.Equal(t, int64(200), *out.Long) - assert.Equal(t, int64(123), *out.Num) - assert.Equal(t, "myname", *out.Str) - assert.Equal(t, true, *out.TrueBool) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "a", *out.Char; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := 1.3, *out.Double; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := false, *out.FalseBool; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := 1.2, *out.Float; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(200), *out.Long; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(123), *out.Num; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "myname", *out.Str; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := true, *out.TrueBool; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1136,12 +1155,20 @@ func TestOutputService2ProtocolTestBlobMembersCase1(t *testing.T) { // unmarshal response jsonrpc.UnmarshalMeta(req) jsonrpc.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "hi!", string(out.BlobMember)) - assert.Equal(t, "there!", string(out.StructMember.Foo)) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "hi!", string(out.BlobMember); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "there!", string(out.StructMember.Foo); e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1157,12 +1184,20 @@ func TestOutputService3ProtocolTestTimestampMembersCase1(t *testing.T) { // unmarshal response jsonrpc.UnmarshalMeta(req) jsonrpc.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String()) - assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String()) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String(); e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1178,12 +1213,20 @@ func TestOutputService4ProtocolTestListsCase1(t *testing.T) { // unmarshal response jsonrpc.UnmarshalMeta(req) jsonrpc.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.ListMember[0]) - assert.Equal(t, "b", *out.ListMember[1]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "a", *out.ListMember[0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "b", *out.ListMember[1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1199,16 +1242,32 @@ func TestOutputService4ProtocolTestListsCase2(t *testing.T) { // unmarshal response jsonrpc.UnmarshalMeta(req) jsonrpc.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.ListMember[0]) - assert.Nil(t, out.ListMember[1]) - assert.Nil(t, out.ListMemberMap[1]) - assert.Nil(t, out.ListMemberMap[2]) - assert.Nil(t, out.ListMemberStruct[1]) - assert.Nil(t, out.ListMemberStruct[2]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "a", *out.ListMember[0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e := out.ListMember[1]; e != nil { + t.Errorf("expect nil, got %v", e) + } + if e := out.ListMemberMap[1]; e != nil { + t.Errorf("expect nil, got %v", e) + } + if e := out.ListMemberMap[2]; e != nil { + t.Errorf("expect nil, got %v", e) + } + if e := out.ListMemberStruct[1]; e != nil { + t.Errorf("expect nil, got %v", e) + } + if e := out.ListMemberStruct[2]; e != nil { + t.Errorf("expect nil, got %v", e) + } } @@ -1224,14 +1283,26 @@ func TestOutputService5ProtocolTestMapsCase1(t *testing.T) { // unmarshal response jsonrpc.UnmarshalMeta(req) jsonrpc.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, int64(1), *out.MapMember["a"][0]) - assert.Equal(t, int64(2), *out.MapMember["a"][1]) - assert.Equal(t, int64(3), *out.MapMember["b"][0]) - assert.Equal(t, int64(4), *out.MapMember["b"][1]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := int64(1), *out.MapMember["a"][0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(2), *out.MapMember["a"][1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(3), *out.MapMember["b"][0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(4), *out.MapMember["b"][1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1247,9 +1318,13 @@ func TestOutputService6ProtocolTestIgnoresExtraDataCase1(t *testing.T) { // unmarshal response jsonrpc.UnmarshalMeta(req) jsonrpc.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used + if out == nil { + t.Errorf("expect not to be nil") + } } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go index f66dcc591ad..f9990923ada 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build_test.go @@ -24,7 +24,6 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/query" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/util" - "github.com/stretchr/testify/assert" ) var _ bytes.Buffer // always import bytes @@ -1976,12 +1975,12 @@ func (c *InputService11ProtocolTest) InputService11TestCaseOperation1WithContext type InputService11TestShapeInputService11TestCaseOperation1Input struct { _ struct{} `type:"structure"` - TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` + BlobArgs [][]byte `type:"list" flattened:"true"` } -// SetTimeArg sets the TimeArg field's value. -func (s *InputService11TestShapeInputService11TestCaseOperation1Input) SetTimeArg(v time.Time) *InputService11TestShapeInputService11TestCaseOperation1Input { - s.TimeArg = &v +// SetBlobArgs sets the BlobArgs field's value. +func (s *InputService11TestShapeInputService11TestCaseOperation1Input) SetBlobArgs(v [][]byte) *InputService11TestShapeInputService11TestCaseOperation1Input { + s.BlobArgs = v return s } @@ -2072,14 +2071,14 @@ const opInputService12TestCaseOperation1 = "OperationName" // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputService12TestCaseOperation6Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputService12TestCaseOperation1Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService12TestCaseOperation1, HTTPPath: "/", } if input == nil { - input = &InputService12TestShapeInputService12TestCaseOperation6Input{} + input = &InputService12TestShapeInputService12TestCaseOperation1Input{} } output = &InputService12TestShapeInputService12TestCaseOperation1Output{} @@ -2097,7 +2096,7 @@ func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(inp // // See the AWS API reference guide for 's // API operation InputService12TestCaseOperation1 for usage and error information. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputService12TestCaseOperation6Input) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputService12TestCaseOperation1Input) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { req, out := c.InputService12TestCaseOperation1Request(input) return out, req.Send() } @@ -2111,467 +2110,612 @@ func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *Inp // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation1WithContext(ctx aws.Context, input *InputService12TestShapeInputService12TestCaseOperation6Input, opts ...request.Option) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { +func (c *InputService12ProtocolTest) InputService12TestCaseOperation1WithContext(ctx aws.Context, input *InputService12TestShapeInputService12TestCaseOperation1Input, opts ...request.Option) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { req, out := c.InputService12TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opInputService12TestCaseOperation2 = "OperationName" +type InputService12TestShapeInputService12TestCaseOperation1Input struct { + _ struct{} `type:"structure"` + + TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} -// InputService12TestCaseOperation2Request generates a "aws/request.Request" representing the -// client's request for the InputService12TestCaseOperation2 operation. The "output" return +// SetTimeArg sets the TimeArg field's value. +func (s *InputService12TestShapeInputService12TestCaseOperation1Input) SetTimeArg(v time.Time) *InputService12TestShapeInputService12TestCaseOperation1Input { + s.TimeArg = &v + return s +} + +type InputService12TestShapeInputService12TestCaseOperation1Output struct { + _ struct{} `type:"structure"` +} + +// InputService13ProtocolTest provides the API operation methods for making requests to +// . See this package's package overview docs +// for details on the service. +// +// InputService13ProtocolTest methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type InputService13ProtocolTest struct { + *client.Client +} + +// New creates a new instance of the InputService13ProtocolTest client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a InputService13ProtocolTest client from just a session. +// svc := inputservice13protocoltest.New(mySession) +// +// // Create a InputService13ProtocolTest client with additional configuration +// svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest { + c := p.ClientConfig("inputservice13protocoltest", cfgs...) + return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *InputService13ProtocolTest { + svc := &InputService13ProtocolTest{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "inputservice13protocoltest", + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2014-01-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + return svc +} + +// newRequest creates a new request for a InputService13ProtocolTest operation and runs any +// custom request initialization. +func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + return req +} + +const opInputService13TestCaseOperation1 = "OperationName" + +// InputService13TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService13TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See InputService12TestCaseOperation2 for more information on using the InputService12TestCaseOperation2 +// See InputService13TestCaseOperation1 for more information on using the InputService13TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the InputService12TestCaseOperation2Request method. -// req, resp := client.InputService12TestCaseOperation2Request(params) +// // Example sending a request using the InputService13TestCaseOperation1Request method. +// req, resp := client.InputService13TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *InputService12ProtocolTest) InputService12TestCaseOperation2Request(input *InputService12TestShapeInputService12TestCaseOperation6Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation2Output) { +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputService13TestCaseOperation6Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { op := &request.Operation{ - Name: opInputService12TestCaseOperation2, + Name: opInputService13TestCaseOperation1, HTTPPath: "/", } if input == nil { - input = &InputService12TestShapeInputService12TestCaseOperation6Input{} + input = &InputService13TestShapeInputService13TestCaseOperation6Input{} } - output = &InputService12TestShapeInputService12TestCaseOperation2Output{} + output = &InputService13TestShapeInputService13TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// InputService12TestCaseOperation2 API operation for . +// InputService13TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's -// API operation InputService12TestCaseOperation2 for usage and error information. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation2(input *InputService12TestShapeInputService12TestCaseOperation6Input) (*InputService12TestShapeInputService12TestCaseOperation2Output, error) { - req, out := c.InputService12TestCaseOperation2Request(input) +// API operation InputService13TestCaseOperation1 for usage and error information. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputService13TestCaseOperation6Input) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { + req, out := c.InputService13TestCaseOperation1Request(input) return out, req.Send() } -// InputService12TestCaseOperation2WithContext is the same as InputService12TestCaseOperation2 with the addition of +// InputService13TestCaseOperation1WithContext is the same as InputService13TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // -// See InputService12TestCaseOperation2 for details on how to use this API operation. +// See InputService13TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation2WithContext(ctx aws.Context, input *InputService12TestShapeInputService12TestCaseOperation6Input, opts ...request.Option) (*InputService12TestShapeInputService12TestCaseOperation2Output, error) { - req, out := c.InputService12TestCaseOperation2Request(input) +func (c *InputService13ProtocolTest) InputService13TestCaseOperation1WithContext(ctx aws.Context, input *InputService13TestShapeInputService13TestCaseOperation6Input, opts ...request.Option) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { + req, out := c.InputService13TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opInputService12TestCaseOperation3 = "OperationName" +const opInputService13TestCaseOperation2 = "OperationName" -// InputService12TestCaseOperation3Request generates a "aws/request.Request" representing the -// client's request for the InputService12TestCaseOperation3 operation. The "output" return +// InputService13TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService13TestCaseOperation2 operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See InputService12TestCaseOperation3 for more information on using the InputService12TestCaseOperation3 +// See InputService13TestCaseOperation2 for more information on using the InputService13TestCaseOperation2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the InputService12TestCaseOperation3Request method. -// req, resp := client.InputService12TestCaseOperation3Request(params) +// // Example sending a request using the InputService13TestCaseOperation2Request method. +// req, resp := client.InputService13TestCaseOperation2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *InputService12ProtocolTest) InputService12TestCaseOperation3Request(input *InputService12TestShapeInputService12TestCaseOperation6Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation3Output) { +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2Request(input *InputService13TestShapeInputService13TestCaseOperation6Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation2Output) { op := &request.Operation{ - Name: opInputService12TestCaseOperation3, + Name: opInputService13TestCaseOperation2, HTTPPath: "/", } if input == nil { - input = &InputService12TestShapeInputService12TestCaseOperation6Input{} + input = &InputService13TestShapeInputService13TestCaseOperation6Input{} } - output = &InputService12TestShapeInputService12TestCaseOperation3Output{} + output = &InputService13TestShapeInputService13TestCaseOperation2Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// InputService12TestCaseOperation3 API operation for . +// InputService13TestCaseOperation2 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's -// API operation InputService12TestCaseOperation3 for usage and error information. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation3(input *InputService12TestShapeInputService12TestCaseOperation6Input) (*InputService12TestShapeInputService12TestCaseOperation3Output, error) { - req, out := c.InputService12TestCaseOperation3Request(input) +// API operation InputService13TestCaseOperation2 for usage and error information. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2(input *InputService13TestShapeInputService13TestCaseOperation6Input) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { + req, out := c.InputService13TestCaseOperation2Request(input) return out, req.Send() } -// InputService12TestCaseOperation3WithContext is the same as InputService12TestCaseOperation3 with the addition of +// InputService13TestCaseOperation2WithContext is the same as InputService13TestCaseOperation2 with the addition of // the ability to pass a context and additional request options. // -// See InputService12TestCaseOperation3 for details on how to use this API operation. +// See InputService13TestCaseOperation2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation3WithContext(ctx aws.Context, input *InputService12TestShapeInputService12TestCaseOperation6Input, opts ...request.Option) (*InputService12TestShapeInputService12TestCaseOperation3Output, error) { - req, out := c.InputService12TestCaseOperation3Request(input) +func (c *InputService13ProtocolTest) InputService13TestCaseOperation2WithContext(ctx aws.Context, input *InputService13TestShapeInputService13TestCaseOperation6Input, opts ...request.Option) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { + req, out := c.InputService13TestCaseOperation2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opInputService12TestCaseOperation4 = "OperationName" +const opInputService13TestCaseOperation3 = "OperationName" -// InputService12TestCaseOperation4Request generates a "aws/request.Request" representing the -// client's request for the InputService12TestCaseOperation4 operation. The "output" return +// InputService13TestCaseOperation3Request generates a "aws/request.Request" representing the +// client's request for the InputService13TestCaseOperation3 operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See InputService12TestCaseOperation4 for more information on using the InputService12TestCaseOperation4 +// See InputService13TestCaseOperation3 for more information on using the InputService13TestCaseOperation3 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the InputService12TestCaseOperation4Request method. -// req, resp := client.InputService12TestCaseOperation4Request(params) +// // Example sending a request using the InputService13TestCaseOperation3Request method. +// req, resp := client.InputService13TestCaseOperation3Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *InputService12ProtocolTest) InputService12TestCaseOperation4Request(input *InputService12TestShapeInputService12TestCaseOperation6Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation4Output) { +func (c *InputService13ProtocolTest) InputService13TestCaseOperation3Request(input *InputService13TestShapeInputService13TestCaseOperation6Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation3Output) { op := &request.Operation{ - Name: opInputService12TestCaseOperation4, + Name: opInputService13TestCaseOperation3, HTTPPath: "/", } if input == nil { - input = &InputService12TestShapeInputService12TestCaseOperation6Input{} + input = &InputService13TestShapeInputService13TestCaseOperation6Input{} } - output = &InputService12TestShapeInputService12TestCaseOperation4Output{} + output = &InputService13TestShapeInputService13TestCaseOperation3Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// InputService12TestCaseOperation4 API operation for . +// InputService13TestCaseOperation3 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's -// API operation InputService12TestCaseOperation4 for usage and error information. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation4(input *InputService12TestShapeInputService12TestCaseOperation6Input) (*InputService12TestShapeInputService12TestCaseOperation4Output, error) { - req, out := c.InputService12TestCaseOperation4Request(input) +// API operation InputService13TestCaseOperation3 for usage and error information. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation3(input *InputService13TestShapeInputService13TestCaseOperation6Input) (*InputService13TestShapeInputService13TestCaseOperation3Output, error) { + req, out := c.InputService13TestCaseOperation3Request(input) return out, req.Send() } -// InputService12TestCaseOperation4WithContext is the same as InputService12TestCaseOperation4 with the addition of +// InputService13TestCaseOperation3WithContext is the same as InputService13TestCaseOperation3 with the addition of // the ability to pass a context and additional request options. // -// See InputService12TestCaseOperation4 for details on how to use this API operation. +// See InputService13TestCaseOperation3 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation4WithContext(ctx aws.Context, input *InputService12TestShapeInputService12TestCaseOperation6Input, opts ...request.Option) (*InputService12TestShapeInputService12TestCaseOperation4Output, error) { - req, out := c.InputService12TestCaseOperation4Request(input) +func (c *InputService13ProtocolTest) InputService13TestCaseOperation3WithContext(ctx aws.Context, input *InputService13TestShapeInputService13TestCaseOperation6Input, opts ...request.Option) (*InputService13TestShapeInputService13TestCaseOperation3Output, error) { + req, out := c.InputService13TestCaseOperation3Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opInputService12TestCaseOperation5 = "OperationName" +const opInputService13TestCaseOperation4 = "OperationName" -// InputService12TestCaseOperation5Request generates a "aws/request.Request" representing the -// client's request for the InputService12TestCaseOperation5 operation. The "output" return +// InputService13TestCaseOperation4Request generates a "aws/request.Request" representing the +// client's request for the InputService13TestCaseOperation4 operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See InputService12TestCaseOperation5 for more information on using the InputService12TestCaseOperation5 +// See InputService13TestCaseOperation4 for more information on using the InputService13TestCaseOperation4 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the InputService12TestCaseOperation5Request method. -// req, resp := client.InputService12TestCaseOperation5Request(params) +// // Example sending a request using the InputService13TestCaseOperation4Request method. +// req, resp := client.InputService13TestCaseOperation4Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *InputService12ProtocolTest) InputService12TestCaseOperation5Request(input *InputService12TestShapeInputService12TestCaseOperation6Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation5Output) { +func (c *InputService13ProtocolTest) InputService13TestCaseOperation4Request(input *InputService13TestShapeInputService13TestCaseOperation6Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation4Output) { op := &request.Operation{ - Name: opInputService12TestCaseOperation5, + Name: opInputService13TestCaseOperation4, HTTPPath: "/", } if input == nil { - input = &InputService12TestShapeInputService12TestCaseOperation6Input{} + input = &InputService13TestShapeInputService13TestCaseOperation6Input{} } - output = &InputService12TestShapeInputService12TestCaseOperation5Output{} + output = &InputService13TestShapeInputService13TestCaseOperation4Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// InputService12TestCaseOperation5 API operation for . +// InputService13TestCaseOperation4 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's -// API operation InputService12TestCaseOperation5 for usage and error information. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation5(input *InputService12TestShapeInputService12TestCaseOperation6Input) (*InputService12TestShapeInputService12TestCaseOperation5Output, error) { - req, out := c.InputService12TestCaseOperation5Request(input) +// API operation InputService13TestCaseOperation4 for usage and error information. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation4(input *InputService13TestShapeInputService13TestCaseOperation6Input) (*InputService13TestShapeInputService13TestCaseOperation4Output, error) { + req, out := c.InputService13TestCaseOperation4Request(input) return out, req.Send() } -// InputService12TestCaseOperation5WithContext is the same as InputService12TestCaseOperation5 with the addition of +// InputService13TestCaseOperation4WithContext is the same as InputService13TestCaseOperation4 with the addition of // the ability to pass a context and additional request options. // -// See InputService12TestCaseOperation5 for details on how to use this API operation. +// See InputService13TestCaseOperation4 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation5WithContext(ctx aws.Context, input *InputService12TestShapeInputService12TestCaseOperation6Input, opts ...request.Option) (*InputService12TestShapeInputService12TestCaseOperation5Output, error) { - req, out := c.InputService12TestCaseOperation5Request(input) +func (c *InputService13ProtocolTest) InputService13TestCaseOperation4WithContext(ctx aws.Context, input *InputService13TestShapeInputService13TestCaseOperation6Input, opts ...request.Option) (*InputService13TestShapeInputService13TestCaseOperation4Output, error) { + req, out := c.InputService13TestCaseOperation4Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opInputService12TestCaseOperation6 = "OperationName" +const opInputService13TestCaseOperation5 = "OperationName" -// InputService12TestCaseOperation6Request generates a "aws/request.Request" representing the -// client's request for the InputService12TestCaseOperation6 operation. The "output" return +// InputService13TestCaseOperation5Request generates a "aws/request.Request" representing the +// client's request for the InputService13TestCaseOperation5 operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See InputService12TestCaseOperation6 for more information on using the InputService12TestCaseOperation6 +// See InputService13TestCaseOperation5 for more information on using the InputService13TestCaseOperation5 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the InputService12TestCaseOperation6Request method. -// req, resp := client.InputService12TestCaseOperation6Request(params) +// // Example sending a request using the InputService13TestCaseOperation5Request method. +// req, resp := client.InputService13TestCaseOperation5Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *InputService12ProtocolTest) InputService12TestCaseOperation6Request(input *InputService12TestShapeInputService12TestCaseOperation6Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation6Output) { +func (c *InputService13ProtocolTest) InputService13TestCaseOperation5Request(input *InputService13TestShapeInputService13TestCaseOperation6Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation5Output) { op := &request.Operation{ - Name: opInputService12TestCaseOperation6, + Name: opInputService13TestCaseOperation5, HTTPPath: "/", } if input == nil { - input = &InputService12TestShapeInputService12TestCaseOperation6Input{} + input = &InputService13TestShapeInputService13TestCaseOperation6Input{} } - output = &InputService12TestShapeInputService12TestCaseOperation6Output{} + output = &InputService13TestShapeInputService13TestCaseOperation5Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// InputService12TestCaseOperation6 API operation for . +// InputService13TestCaseOperation5 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's -// API operation InputService12TestCaseOperation6 for usage and error information. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation6(input *InputService12TestShapeInputService12TestCaseOperation6Input) (*InputService12TestShapeInputService12TestCaseOperation6Output, error) { - req, out := c.InputService12TestCaseOperation6Request(input) +// API operation InputService13TestCaseOperation5 for usage and error information. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation5(input *InputService13TestShapeInputService13TestCaseOperation6Input) (*InputService13TestShapeInputService13TestCaseOperation5Output, error) { + req, out := c.InputService13TestCaseOperation5Request(input) return out, req.Send() } -// InputService12TestCaseOperation6WithContext is the same as InputService12TestCaseOperation6 with the addition of +// InputService13TestCaseOperation5WithContext is the same as InputService13TestCaseOperation5 with the addition of // the ability to pass a context and additional request options. // -// See InputService12TestCaseOperation6 for details on how to use this API operation. +// See InputService13TestCaseOperation5 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation6WithContext(ctx aws.Context, input *InputService12TestShapeInputService12TestCaseOperation6Input, opts ...request.Option) (*InputService12TestShapeInputService12TestCaseOperation6Output, error) { - req, out := c.InputService12TestCaseOperation6Request(input) +func (c *InputService13ProtocolTest) InputService13TestCaseOperation5WithContext(ctx aws.Context, input *InputService13TestShapeInputService13TestCaseOperation6Input, opts ...request.Option) (*InputService13TestShapeInputService13TestCaseOperation5Output, error) { + req, out := c.InputService13TestCaseOperation5Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -type InputService12TestShapeInputService12TestCaseOperation1Output struct { +const opInputService13TestCaseOperation6 = "OperationName" + +// InputService13TestCaseOperation6Request generates a "aws/request.Request" representing the +// client's request for the InputService13TestCaseOperation6 operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See InputService13TestCaseOperation6 for more information on using the InputService13TestCaseOperation6 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the InputService13TestCaseOperation6Request method. +// req, resp := client.InputService13TestCaseOperation6Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *InputService13ProtocolTest) InputService13TestCaseOperation6Request(input *InputService13TestShapeInputService13TestCaseOperation6Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation6Output) { + op := &request.Operation{ + Name: opInputService13TestCaseOperation6, + HTTPPath: "/", + } + + if input == nil { + input = &InputService13TestShapeInputService13TestCaseOperation6Input{} + } + + output = &InputService13TestShapeInputService13TestCaseOperation6Output{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// InputService13TestCaseOperation6 API operation for . +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for 's +// API operation InputService13TestCaseOperation6 for usage and error information. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation6(input *InputService13TestShapeInputService13TestCaseOperation6Input) (*InputService13TestShapeInputService13TestCaseOperation6Output, error) { + req, out := c.InputService13TestCaseOperation6Request(input) + return out, req.Send() +} + +// InputService13TestCaseOperation6WithContext is the same as InputService13TestCaseOperation6 with the addition of +// the ability to pass a context and additional request options. +// +// See InputService13TestCaseOperation6 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *InputService13ProtocolTest) InputService13TestCaseOperation6WithContext(ctx aws.Context, input *InputService13TestShapeInputService13TestCaseOperation6Input, opts ...request.Option) (*InputService13TestShapeInputService13TestCaseOperation6Output, error) { + req, out := c.InputService13TestCaseOperation6Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type InputService13TestShapeInputService13TestCaseOperation1Output struct { _ struct{} `type:"structure"` } -type InputService12TestShapeInputService12TestCaseOperation2Output struct { +type InputService13TestShapeInputService13TestCaseOperation2Output struct { _ struct{} `type:"structure"` } -type InputService12TestShapeInputService12TestCaseOperation3Output struct { +type InputService13TestShapeInputService13TestCaseOperation3Output struct { _ struct{} `type:"structure"` } -type InputService12TestShapeInputService12TestCaseOperation4Output struct { +type InputService13TestShapeInputService13TestCaseOperation4Output struct { _ struct{} `type:"structure"` } -type InputService12TestShapeInputService12TestCaseOperation5Output struct { +type InputService13TestShapeInputService13TestCaseOperation5Output struct { _ struct{} `type:"structure"` } -type InputService12TestShapeInputService12TestCaseOperation6Input struct { +type InputService13TestShapeInputService13TestCaseOperation6Input struct { _ struct{} `type:"structure"` - RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"` + RecursiveStruct *InputService13TestShapeRecursiveStructType `type:"structure"` } // SetRecursiveStruct sets the RecursiveStruct field's value. -func (s *InputService12TestShapeInputService12TestCaseOperation6Input) SetRecursiveStruct(v *InputService12TestShapeRecursiveStructType) *InputService12TestShapeInputService12TestCaseOperation6Input { +func (s *InputService13TestShapeInputService13TestCaseOperation6Input) SetRecursiveStruct(v *InputService13TestShapeRecursiveStructType) *InputService13TestShapeInputService13TestCaseOperation6Input { s.RecursiveStruct = v return s } -type InputService12TestShapeInputService12TestCaseOperation6Output struct { +type InputService13TestShapeInputService13TestCaseOperation6Output struct { _ struct{} `type:"structure"` } -type InputService12TestShapeRecursiveStructType struct { +type InputService13TestShapeRecursiveStructType struct { _ struct{} `type:"structure"` NoRecurse *string `type:"string"` - RecursiveList []*InputService12TestShapeRecursiveStructType `type:"list"` + RecursiveList []*InputService13TestShapeRecursiveStructType `type:"list"` - RecursiveMap map[string]*InputService12TestShapeRecursiveStructType `type:"map"` + RecursiveMap map[string]*InputService13TestShapeRecursiveStructType `type:"map"` - RecursiveStruct *InputService12TestShapeRecursiveStructType `type:"structure"` + RecursiveStruct *InputService13TestShapeRecursiveStructType `type:"structure"` } // SetNoRecurse sets the NoRecurse field's value. -func (s *InputService12TestShapeRecursiveStructType) SetNoRecurse(v string) *InputService12TestShapeRecursiveStructType { +func (s *InputService13TestShapeRecursiveStructType) SetNoRecurse(v string) *InputService13TestShapeRecursiveStructType { s.NoRecurse = &v return s } // SetRecursiveList sets the RecursiveList field's value. -func (s *InputService12TestShapeRecursiveStructType) SetRecursiveList(v []*InputService12TestShapeRecursiveStructType) *InputService12TestShapeRecursiveStructType { +func (s *InputService13TestShapeRecursiveStructType) SetRecursiveList(v []*InputService13TestShapeRecursiveStructType) *InputService13TestShapeRecursiveStructType { s.RecursiveList = v return s } // SetRecursiveMap sets the RecursiveMap field's value. -func (s *InputService12TestShapeRecursiveStructType) SetRecursiveMap(v map[string]*InputService12TestShapeRecursiveStructType) *InputService12TestShapeRecursiveStructType { +func (s *InputService13TestShapeRecursiveStructType) SetRecursiveMap(v map[string]*InputService13TestShapeRecursiveStructType) *InputService13TestShapeRecursiveStructType { s.RecursiveMap = v return s } // SetRecursiveStruct sets the RecursiveStruct field's value. -func (s *InputService12TestShapeRecursiveStructType) SetRecursiveStruct(v *InputService12TestShapeRecursiveStructType) *InputService12TestShapeRecursiveStructType { +func (s *InputService13TestShapeRecursiveStructType) SetRecursiveStruct(v *InputService13TestShapeRecursiveStructType) *InputService13TestShapeRecursiveStructType { s.RecursiveStruct = v return s } -// InputService13ProtocolTest provides the API operation methods for making requests to +// InputService14ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // -// InputService13ProtocolTest methods are safe to use concurrently. It is not safe to +// InputService14ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. -type InputService13ProtocolTest struct { +type InputService14ProtocolTest struct { *client.Client } -// New creates a new instance of the InputService13ProtocolTest client with a session. +// New creates a new instance of the InputService14ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: -// // Create a InputService13ProtocolTest client from just a session. -// svc := inputservice13protocoltest.New(mySession) +// // Create a InputService14ProtocolTest client from just a session. +// svc := inputservice14protocoltest.New(mySession) // -// // Create a InputService13ProtocolTest client with additional configuration -// svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest { - c := p.ClientConfig("inputservice13protocoltest", cfgs...) - return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +// // Create a InputService14ProtocolTest client with additional configuration +// svc := inputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func NewInputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService14ProtocolTest { + c := p.ClientConfig("inputservice14protocoltest", cfgs...) + return newInputService14ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *InputService13ProtocolTest { - svc := &InputService13ProtocolTest{ +func newInputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *InputService14ProtocolTest { + svc := &InputService14ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ - ServiceName: "inputservice13protocoltest", + ServiceName: "inputservice14protocoltest", SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, @@ -2591,173 +2735,173 @@ func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handle return svc } -// newRequest creates a new request for a InputService13ProtocolTest operation and runs any +// newRequest creates a new request for a InputService14ProtocolTest operation and runs any // custom request initialization. -func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { +func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } -const opInputService13TestCaseOperation1 = "OperationName" +const opInputService14TestCaseOperation1 = "OperationName" -// InputService13TestCaseOperation1Request generates a "aws/request.Request" representing the -// client's request for the InputService13TestCaseOperation1 operation. The "output" return +// InputService14TestCaseOperation1Request generates a "aws/request.Request" representing the +// client's request for the InputService14TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See InputService13TestCaseOperation1 for more information on using the InputService13TestCaseOperation1 +// See InputService14TestCaseOperation1 for more information on using the InputService14TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the InputService13TestCaseOperation1Request method. -// req, resp := client.InputService13TestCaseOperation1Request(params) +// // Example sending a request using the InputService14TestCaseOperation1Request method. +// req, resp := client.InputService14TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputService13TestCaseOperation2Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputService14TestCaseOperation2Input) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) { op := &request.Operation{ - Name: opInputService13TestCaseOperation1, + Name: opInputService14TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &InputService13TestShapeInputService13TestCaseOperation2Input{} + input = &InputService14TestShapeInputService14TestCaseOperation2Input{} } - output = &InputService13TestShapeInputService13TestCaseOperation1Output{} + output = &InputService14TestShapeInputService14TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// InputService13TestCaseOperation1 API operation for . +// InputService14TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's -// API operation InputService13TestCaseOperation1 for usage and error information. -func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputService13TestCaseOperation2Input) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { - req, out := c.InputService13TestCaseOperation1Request(input) +// API operation InputService14TestCaseOperation1 for usage and error information. +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputService14TestCaseOperation2Input) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) { + req, out := c.InputService14TestCaseOperation1Request(input) return out, req.Send() } -// InputService13TestCaseOperation1WithContext is the same as InputService13TestCaseOperation1 with the addition of +// InputService14TestCaseOperation1WithContext is the same as InputService14TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // -// See InputService13TestCaseOperation1 for details on how to use this API operation. +// See InputService14TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *InputService13ProtocolTest) InputService13TestCaseOperation1WithContext(ctx aws.Context, input *InputService13TestShapeInputService13TestCaseOperation2Input, opts ...request.Option) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { - req, out := c.InputService13TestCaseOperation1Request(input) +func (c *InputService14ProtocolTest) InputService14TestCaseOperation1WithContext(ctx aws.Context, input *InputService14TestShapeInputService14TestCaseOperation2Input, opts ...request.Option) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) { + req, out := c.InputService14TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opInputService13TestCaseOperation2 = "OperationName" +const opInputService14TestCaseOperation2 = "OperationName" -// InputService13TestCaseOperation2Request generates a "aws/request.Request" representing the -// client's request for the InputService13TestCaseOperation2 operation. The "output" return +// InputService14TestCaseOperation2Request generates a "aws/request.Request" representing the +// client's request for the InputService14TestCaseOperation2 operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See InputService13TestCaseOperation2 for more information on using the InputService13TestCaseOperation2 +// See InputService14TestCaseOperation2 for more information on using the InputService14TestCaseOperation2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the InputService13TestCaseOperation2Request method. -// req, resp := client.InputService13TestCaseOperation2Request(params) +// // Example sending a request using the InputService14TestCaseOperation2Request method. +// req, resp := client.InputService14TestCaseOperation2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *InputService13ProtocolTest) InputService13TestCaseOperation2Request(input *InputService13TestShapeInputService13TestCaseOperation2Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation2Output) { +func (c *InputService14ProtocolTest) InputService14TestCaseOperation2Request(input *InputService14TestShapeInputService14TestCaseOperation2Input) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation2Output) { op := &request.Operation{ - Name: opInputService13TestCaseOperation2, + Name: opInputService14TestCaseOperation2, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &InputService13TestShapeInputService13TestCaseOperation2Input{} + input = &InputService14TestShapeInputService14TestCaseOperation2Input{} } - output = &InputService13TestShapeInputService13TestCaseOperation2Output{} + output = &InputService14TestShapeInputService14TestCaseOperation2Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// InputService13TestCaseOperation2 API operation for . +// InputService14TestCaseOperation2 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's -// API operation InputService13TestCaseOperation2 for usage and error information. -func (c *InputService13ProtocolTest) InputService13TestCaseOperation2(input *InputService13TestShapeInputService13TestCaseOperation2Input) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { - req, out := c.InputService13TestCaseOperation2Request(input) +// API operation InputService14TestCaseOperation2 for usage and error information. +func (c *InputService14ProtocolTest) InputService14TestCaseOperation2(input *InputService14TestShapeInputService14TestCaseOperation2Input) (*InputService14TestShapeInputService14TestCaseOperation2Output, error) { + req, out := c.InputService14TestCaseOperation2Request(input) return out, req.Send() } -// InputService13TestCaseOperation2WithContext is the same as InputService13TestCaseOperation2 with the addition of +// InputService14TestCaseOperation2WithContext is the same as InputService14TestCaseOperation2 with the addition of // the ability to pass a context and additional request options. // -// See InputService13TestCaseOperation2 for details on how to use this API operation. +// See InputService14TestCaseOperation2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *InputService13ProtocolTest) InputService13TestCaseOperation2WithContext(ctx aws.Context, input *InputService13TestShapeInputService13TestCaseOperation2Input, opts ...request.Option) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { - req, out := c.InputService13TestCaseOperation2Request(input) +func (c *InputService14ProtocolTest) InputService14TestCaseOperation2WithContext(ctx aws.Context, input *InputService14TestShapeInputService14TestCaseOperation2Input, opts ...request.Option) (*InputService14TestShapeInputService14TestCaseOperation2Output, error) { + req, out := c.InputService14TestCaseOperation2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -type InputService13TestShapeInputService13TestCaseOperation1Output struct { +type InputService14TestShapeInputService14TestCaseOperation1Output struct { _ struct{} `type:"structure"` } -type InputService13TestShapeInputService13TestCaseOperation2Input struct { +type InputService14TestShapeInputService14TestCaseOperation2Input struct { _ struct{} `type:"structure"` Token *string `type:"string" idempotencyToken:"true"` } // SetToken sets the Token field's value. -func (s *InputService13TestShapeInputService13TestCaseOperation2Input) SetToken(v string) *InputService13TestShapeInputService13TestCaseOperation2Input { +func (s *InputService14TestShapeInputService14TestCaseOperation2Input) SetToken(v string) *InputService14TestShapeInputService14TestCaseOperation2Input { s.Token = &v return s } -type InputService13TestShapeInputService13TestCaseOperation2Output struct { +type InputService14TestShapeInputService14TestCaseOperation2Output struct { _ struct{} `type:"structure"` } @@ -2776,10 +2920,14 @@ func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`, util.Trim(string(body))) @@ -2800,10 +2948,14 @@ func TestInputService1ProtocolTestScalarMembersCase2(t *testing.T) { // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&Baz=true&Version=2014-01-01`, util.Trim(string(body))) @@ -2824,10 +2976,14 @@ func TestInputService1ProtocolTestScalarMembersCase3(t *testing.T) { // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&Baz=false&Version=2014-01-01`, util.Trim(string(body))) @@ -2850,10 +3006,14 @@ func TestInputService2ProtocolTestNestedStructureMembersCase1(t *testing.T) { // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&StructArg.ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) @@ -2878,10 +3038,14 @@ func TestInputService3ProtocolTestListTypesCase1(t *testing.T) { // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz&Version=2014-01-01`, util.Trim(string(body))) @@ -2902,10 +3066,14 @@ func TestInputService3ProtocolTestListTypesCase2(t *testing.T) { // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&ListArg=&Version=2014-01-01`, util.Trim(string(body))) @@ -2931,10 +3099,14 @@ func TestInputService4ProtocolTestFlattenedListCase1(t *testing.T) { // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&ListArg.1=a&ListArg.2=b&ListArg.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) @@ -2957,10 +3129,14 @@ func TestInputService4ProtocolTestFlattenedListCase2(t *testing.T) { // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&Foo.1=a&Version=2014-01-01`, util.Trim(string(body))) @@ -2984,10 +3160,14 @@ func TestInputService5ProtocolTestSerializeFlattenedMapTypeCase1(t *testing.T) { // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&MapArg.1.key=key1&MapArg.1.value=val1&MapArg.2.key=key2&MapArg.2.value=val2&Version=2014-01-01`, util.Trim(string(body))) @@ -3012,10 +3192,14 @@ func TestInputService6ProtocolTestNonFlattenedListWithLocationNameCase1(t *testi // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&ListArg.item.1=a&ListArg.item.2=b&ListArg.item.3=c&Version=2014-01-01`, util.Trim(string(body))) @@ -3041,10 +3225,14 @@ func TestInputService7ProtocolTestFlattenedListWithLocationNameCase1(t *testing. // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&ListArgLocation.1=a&ListArgLocation.2=b&ListArgLocation.3=c&ScalarArg=foo&Version=2014-01-01`, util.Trim(string(body))) @@ -3068,10 +3256,14 @@ func TestInputService8ProtocolTestSerializeMapTypeCase1(t *testing.T) { // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2&Version=2014-01-01`, util.Trim(string(body))) @@ -3095,10 +3287,14 @@ func TestInputService9ProtocolTestSerializeMapTypeWithLocationNameCase1(t *testi // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&MapArg.entry.1.TheKey=key1&MapArg.entry.1.TheValue=val1&MapArg.entry.2.TheKey=key2&MapArg.entry.2.TheValue=val2&Version=2014-01-01`, util.Trim(string(body))) @@ -3119,10 +3315,14 @@ func TestInputService10ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`, util.Trim(string(body))) @@ -3133,22 +3333,28 @@ func TestInputService10ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { } -func TestInputService11ProtocolTestTimestampValuesCase1(t *testing.T) { +func TestInputService11ProtocolTestBase64EncodedBlobsNestedCase1(t *testing.T) { svc := NewInputService11ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService11TestShapeInputService11TestCaseOperation1Input{ - TimeArg: aws.Time(time.Unix(1422172800, 0)), + BlobArgs: [][]byte{ + []byte("foo"), + }, } req, _ := svc.InputService11TestCaseOperation1Request(input) r := req.HTTPRequest // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) - awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body))) + awstesting.AssertQuery(t, `Action=OperationName&BlobArgs.1=Zm9v&Version=2014-01-01`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) @@ -3157,22 +3363,54 @@ func TestInputService11ProtocolTestTimestampValuesCase1(t *testing.T) { } -func TestInputService12ProtocolTestRecursiveShapesCase1(t *testing.T) { +func TestInputService12ProtocolTestTimestampValuesCase1(t *testing.T) { svc := NewInputService12ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) - input := &InputService12TestShapeInputService12TestCaseOperation6Input{ - RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + input := &InputService12TestShapeInputService12TestCaseOperation1Input{ + TimeArg: aws.Time(time.Unix(1422172800, 0)), + } + req, _ := svc.InputService12TestCaseOperation1Request(input) + r := req.HTTPRequest + + // build request + query.Build(req) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } + + // assert body + if r.Body == nil { + t.Errorf("expect body not to be nil") + } + body, _ := ioutil.ReadAll(r.Body) + awstesting.AssertQuery(t, `Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`, util.Trim(string(body))) + + // assert URL + awstesting.AssertURL(t, "https://test/", r.URL.String()) + + // assert headers + +} + +func TestInputService13ProtocolTestRecursiveShapesCase1(t *testing.T) { + svc := NewInputService13ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputService13TestCaseOperation6Input{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ NoRecurse: aws.String("foo"), }, } - req, _ := svc.InputService12TestCaseOperation1Request(input) + req, _ := svc.InputService13TestCaseOperation1Request(input) r := req.HTTPRequest // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) @@ -3183,24 +3421,28 @@ func TestInputService12ProtocolTestRecursiveShapesCase1(t *testing.T) { } -func TestInputService12ProtocolTestRecursiveShapesCase2(t *testing.T) { - svc := NewInputService12ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) - input := &InputService12TestShapeInputService12TestCaseOperation6Input{ - RecursiveStruct: &InputService12TestShapeRecursiveStructType{ - RecursiveStruct: &InputService12TestShapeRecursiveStructType{ +func TestInputService13ProtocolTestRecursiveShapesCase2(t *testing.T) { + svc := NewInputService13ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputService13TestCaseOperation6Input{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ NoRecurse: aws.String("foo"), }, }, } - req, _ := svc.InputService12TestCaseOperation2Request(input) + req, _ := svc.InputService13TestCaseOperation2Request(input) r := req.HTTPRequest // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) @@ -3211,28 +3453,32 @@ func TestInputService12ProtocolTestRecursiveShapesCase2(t *testing.T) { } -func TestInputService12ProtocolTestRecursiveShapesCase3(t *testing.T) { - svc := NewInputService12ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) - input := &InputService12TestShapeInputService12TestCaseOperation6Input{ - RecursiveStruct: &InputService12TestShapeRecursiveStructType{ - RecursiveStruct: &InputService12TestShapeRecursiveStructType{ - RecursiveStruct: &InputService12TestShapeRecursiveStructType{ - RecursiveStruct: &InputService12TestShapeRecursiveStructType{ +func TestInputService13ProtocolTestRecursiveShapesCase3(t *testing.T) { + svc := NewInputService13ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputService13TestCaseOperation6Input{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ NoRecurse: aws.String("foo"), }, }, }, }, } - req, _ := svc.InputService12TestCaseOperation3Request(input) + req, _ := svc.InputService13TestCaseOperation3Request(input) r := req.HTTPRequest // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`, util.Trim(string(body))) @@ -3243,11 +3489,11 @@ func TestInputService12ProtocolTestRecursiveShapesCase3(t *testing.T) { } -func TestInputService12ProtocolTestRecursiveShapesCase4(t *testing.T) { - svc := NewInputService12ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) - input := &InputService12TestShapeInputService12TestCaseOperation6Input{ - RecursiveStruct: &InputService12TestShapeRecursiveStructType{ - RecursiveList: []*InputService12TestShapeRecursiveStructType{ +func TestInputService13ProtocolTestRecursiveShapesCase4(t *testing.T) { + svc := NewInputService13ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputService13TestCaseOperation6Input{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveList: []*InputService13TestShapeRecursiveStructType{ { NoRecurse: aws.String("foo"), }, @@ -3257,15 +3503,19 @@ func TestInputService12ProtocolTestRecursiveShapesCase4(t *testing.T) { }, }, } - req, _ := svc.InputService12TestCaseOperation4Request(input) + req, _ := svc.InputService13TestCaseOperation4Request(input) r := req.HTTPRequest // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) @@ -3276,31 +3526,35 @@ func TestInputService12ProtocolTestRecursiveShapesCase4(t *testing.T) { } -func TestInputService12ProtocolTestRecursiveShapesCase5(t *testing.T) { - svc := NewInputService12ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) - input := &InputService12TestShapeInputService12TestCaseOperation6Input{ - RecursiveStruct: &InputService12TestShapeRecursiveStructType{ - RecursiveList: []*InputService12TestShapeRecursiveStructType{ +func TestInputService13ProtocolTestRecursiveShapesCase5(t *testing.T) { + svc := NewInputService13ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputService13TestCaseOperation6Input{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveList: []*InputService13TestShapeRecursiveStructType{ { NoRecurse: aws.String("foo"), }, { - RecursiveStruct: &InputService12TestShapeRecursiveStructType{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ NoRecurse: aws.String("bar"), }, }, }, }, } - req, _ := svc.InputService12TestCaseOperation5Request(input) + req, _ := svc.InputService13TestCaseOperation5Request(input) r := req.HTTPRequest // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) @@ -3311,11 +3565,11 @@ func TestInputService12ProtocolTestRecursiveShapesCase5(t *testing.T) { } -func TestInputService12ProtocolTestRecursiveShapesCase6(t *testing.T) { - svc := NewInputService12ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) - input := &InputService12TestShapeInputService12TestCaseOperation6Input{ - RecursiveStruct: &InputService12TestShapeRecursiveStructType{ - RecursiveMap: map[string]*InputService12TestShapeRecursiveStructType{ +func TestInputService13ProtocolTestRecursiveShapesCase6(t *testing.T) { + svc := NewInputService13ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService13TestShapeInputService13TestCaseOperation6Input{ + RecursiveStruct: &InputService13TestShapeRecursiveStructType{ + RecursiveMap: map[string]*InputService13TestShapeRecursiveStructType{ "bar": { NoRecurse: aws.String("bar"), }, @@ -3325,15 +3579,19 @@ func TestInputService12ProtocolTestRecursiveShapesCase6(t *testing.T) { }, }, } - req, _ := svc.InputService12TestCaseOperation6Request(input) + req, _ := svc.InputService13TestCaseOperation6Request(input) r := req.HTTPRequest // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&RecursiveStruct.RecursiveMap.entry.1.key=foo&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=foo&RecursiveStruct.RecursiveMap.entry.2.key=bar&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=bar&Version=2014-01-01`, util.Trim(string(body))) @@ -3344,20 +3602,24 @@ func TestInputService12ProtocolTestRecursiveShapesCase6(t *testing.T) { } -func TestInputService13ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { - svc := NewInputService13ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) - input := &InputService13TestShapeInputService13TestCaseOperation2Input{ +func TestInputService14ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { + svc := NewInputService14ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService14TestShapeInputService14TestCaseOperation2Input{ Token: aws.String("abc123"), } - req, _ := svc.InputService13TestCaseOperation1Request(input) + req, _ := svc.InputService14TestCaseOperation1Request(input) r := req.HTTPRequest // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&Token=abc123&Version=2014-01-01`, util.Trim(string(body))) @@ -3368,18 +3630,22 @@ func TestInputService13ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { } -func TestInputService13ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { - svc := NewInputService13ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) - input := &InputService13TestShapeInputService13TestCaseOperation2Input{} - req, _ := svc.InputService13TestCaseOperation2Request(input) +func TestInputService14ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { + svc := NewInputService14ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) + input := &InputService14TestShapeInputService14TestCaseOperation2Input{} + req, _ := svc.InputService14TestCaseOperation2Request(input) r := req.HTTPRequest // build request query.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertQuery(t, `Action=OperationName&Token=00000000-0000-4000-8000-000000000000&Version=2014-01-01`, util.Trim(string(body))) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 524ca952adf..5ce9cba3291 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -121,6 +121,10 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string return nil } + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + // check for unflattened list member if !q.isEC2 && tag.Get("flattened") == "" { if listName := tag.Get("locationNameList"); listName == "" { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go index 4f6ff3ee6fc..c13bc06e5a3 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_test.go @@ -24,7 +24,6 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/query" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/util" - "github.com/stretchr/testify/assert" ) var _ bytes.Buffer // always import bytes @@ -2347,19 +2346,41 @@ func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.Char) - assert.Equal(t, 1.3, *out.Double) - assert.Equal(t, false, *out.FalseBool) - assert.Equal(t, 1.2, *out.Float) - assert.Equal(t, int64(200), *out.Long) - assert.Equal(t, int64(123), *out.Num) - assert.Equal(t, "myname", *out.Str) - assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) - assert.Equal(t, true, *out.TrueBool) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "a", *out.Char; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := 1.3, *out.Double; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := false, *out.FalseBool; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := 1.2, *out.Float; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(200), *out.Long; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(123), *out.Num; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "myname", *out.Str; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := true, *out.TrueBool; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2375,11 +2396,17 @@ func TestOutputService2ProtocolTestNotAllMembersInResponseCase1(t *testing.T) { // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "myname", *out.Str) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "myname", *out.Str; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2395,11 +2422,17 @@ func TestOutputService3ProtocolTestBlobCase1(t *testing.T) { // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "value", string(out.Blob)) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "value", string(out.Blob); e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2415,12 +2448,20 @@ func TestOutputService4ProtocolTestListsCase1(t *testing.T) { // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "abc", *out.ListMember[0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "123", *out.ListMember[1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2436,12 +2477,20 @@ func TestOutputService5ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "abc", *out.ListMember[0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "123", *out.ListMember[1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2457,12 +2506,20 @@ func TestOutputService6ProtocolTestFlattenedListCase1(t *testing.T) { // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "abc", *out.ListMember[0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "123", *out.ListMember[1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2478,11 +2535,17 @@ func TestOutputService7ProtocolTestFlattenedSingleElementListCase1(t *testing.T) // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "abc", *out.ListMember[0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2498,16 +2561,32 @@ func TestOutputService8ProtocolTestListOfStructuresCase1(t *testing.T) { // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "firstbar", *out.List[0].Bar) - assert.Equal(t, "firstbaz", *out.List[0].Baz) - assert.Equal(t, "firstfoo", *out.List[0].Foo) - assert.Equal(t, "secondbar", *out.List[1].Bar) - assert.Equal(t, "secondbaz", *out.List[1].Baz) - assert.Equal(t, "secondfoo", *out.List[1].Foo) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "firstbar", *out.List[0].Bar; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "firstbaz", *out.List[0].Baz; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "firstfoo", *out.List[0].Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "secondbar", *out.List[1].Bar; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "secondbaz", *out.List[1].Baz; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "secondfoo", *out.List[1].Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2523,16 +2602,32 @@ func TestOutputService9ProtocolTestFlattenedListOfStructuresCase1(t *testing.T) // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "firstbar", *out.List[0].Bar) - assert.Equal(t, "firstbaz", *out.List[0].Baz) - assert.Equal(t, "firstfoo", *out.List[0].Foo) - assert.Equal(t, "secondbar", *out.List[1].Bar) - assert.Equal(t, "secondbaz", *out.List[1].Baz) - assert.Equal(t, "secondfoo", *out.List[1].Foo) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "firstbar", *out.List[0].Bar; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "firstbaz", *out.List[0].Baz; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "firstfoo", *out.List[0].Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "secondbar", *out.List[1].Bar; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "secondbaz", *out.List[1].Baz; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "secondfoo", *out.List[1].Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2548,12 +2643,20 @@ func TestOutputService10ProtocolTestFlattenedListWithLocationNameCase1(t *testin // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.List[0]) - assert.Equal(t, "b", *out.List[1]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "a", *out.List[0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "b", *out.List[1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2569,12 +2672,20 @@ func TestOutputService11ProtocolTestNormalMapCase1(t *testing.T) { // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"].Foo) - assert.Equal(t, "bar", *out.Map["qux"].Foo) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "bam", *out.Map["baz"].Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "bar", *out.Map["qux"].Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2590,12 +2701,20 @@ func TestOutputService12ProtocolTestFlattenedMapCase1(t *testing.T) { // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"]) - assert.Equal(t, "bar", *out.Map["qux"]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "bam", *out.Map["baz"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "bar", *out.Map["qux"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2611,11 +2730,17 @@ func TestOutputService13ProtocolTestFlattenedMapInShapeDefinitionCase1(t *testin // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bar", *out.Map["qux"]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "bar", *out.Map["qux"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2631,12 +2756,20 @@ func TestOutputService14ProtocolTestNamedMapCase1(t *testing.T) { // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"]) - assert.Equal(t, "bar", *out.Map["qux"]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "bam", *out.Map["baz"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "bar", *out.Map["qux"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2652,10 +2785,16 @@ func TestOutputService15ProtocolTestEmptyStringCase1(t *testing.T) { // unmarshal response query.UnmarshalMeta(req) query.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "", *out.Foo) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "", *out.Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go index 2e682dc65c4..ac4e4d18e6f 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/build_test.go @@ -24,7 +24,6 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restjson" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/util" - "github.com/stretchr/testify/assert" ) var _ bytes.Buffer // always import bytes @@ -4251,7 +4250,9 @@ func TestInputService1ProtocolTestNoParametersCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobs", r.URL.String()) @@ -4270,7 +4271,9 @@ func TestInputService2ProtocolTestURIParameterOnlyWithNoLocationNameCase1(t *tes // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo", r.URL.String()) @@ -4289,7 +4292,9 @@ func TestInputService3ProtocolTestURIParameterOnlyWithLocationNameCase1(t *testi // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/bar", r.URL.String()) @@ -4311,7 +4316,9 @@ func TestInputService4ProtocolTestQuerystringListOfStringsCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/path?item=value1&item=value2", r.URL.String()) @@ -4334,7 +4341,9 @@ func TestInputService5ProtocolTestStringToStringMapsInQuerystringCase1(t *testin // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String()) @@ -4363,7 +4372,9 @@ func TestInputService6ProtocolTestStringToStringListMapsInQuerystringCase1(t *te // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String()) @@ -4382,7 +4393,9 @@ func TestInputService7ProtocolTestBooleanInQuerystringCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/path?bool-query=true", r.URL.String()) @@ -4401,7 +4414,9 @@ func TestInputService7ProtocolTestBooleanInQuerystringCase2(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/path?bool-query=false", r.URL.String()) @@ -4422,7 +4437,9 @@ func TestInputService8ProtocolTestURIParameterAndQuerystringParamsCase1(t *testi // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) @@ -4447,10 +4464,14 @@ func TestInputService9ProtocolTestURIParameterQuerystringParamsAndJSONBodyCase1( // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body))) @@ -4478,10 +4499,14 @@ func TestInputService10ProtocolTestURIParameterQuerystringParamsHeadersAndJSONBo // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Config":{"A":"one","B":"two"}}`, util.Trim(string(body))) @@ -4489,7 +4514,9 @@ func TestInputService10ProtocolTestURIParameterQuerystringParamsHeadersAndJSONBo awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) // assert headers - assert.Equal(t, "12345", r.Header.Get("x-amz-checksum")) + if e, a := "12345", r.Header.Get("x-amz-checksum"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -4505,18 +4532,26 @@ func TestInputService11ProtocolTestStreamingPayloadCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, `contents`, util.Trim(string(body))) + if e, a := "contents", util.Trim(string(body)); e != a { + t.Errorf("expect %v, got %v", e, a) + } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/vaults/name/archives", r.URL.String()) // assert headers - assert.Equal(t, "foo", r.Header.Get("x-amz-sha256-tree-hash")) + if e, a := "foo", r.Header.Get("x-amz-sha256-tree-hash"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -4531,10 +4566,14 @@ func TestInputService12ProtocolTestSerializeBlobsInBodyCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Bar":"QmxvYiBwYXJhbQ=="}`, util.Trim(string(body))) @@ -4555,12 +4594,18 @@ func TestInputService13ProtocolTestBlobPayloadCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, `bar`, util.Trim(string(body))) + if e, a := "bar", util.Trim(string(body)); e != a { + t.Errorf("expect %v, got %v", e, a) + } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) @@ -4577,7 +4622,9 @@ func TestInputService13ProtocolTestBlobPayloadCase2(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) @@ -4598,10 +4645,14 @@ func TestInputService14ProtocolTestStructurePayloadCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"baz":"bar"}`, util.Trim(string(body))) @@ -4620,7 +4671,9 @@ func TestInputService14ProtocolTestStructurePayloadCase2(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) @@ -4637,7 +4690,9 @@ func TestInputService15ProtocolTestOmitsNullQueryParamsButSerializesEmptyStrings // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) @@ -4656,7 +4711,9 @@ func TestInputService15ProtocolTestOmitsNullQueryParamsButSerializesEmptyStrings // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/path?abc=mno¶m-name=", r.URL.String()) @@ -4677,10 +4734,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"NoRecurse":"foo"}}`, util.Trim(string(body))) @@ -4705,10 +4766,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase2(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}`, util.Trim(string(body))) @@ -4737,10 +4802,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase3(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"RecursiveStruct":{"NoRecurse":"foo"}}}}}`, util.Trim(string(body))) @@ -4770,10 +4839,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase4(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"NoRecurse":"bar"}]}}`, util.Trim(string(body))) @@ -4805,10 +4878,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase5(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveList":[{"NoRecurse":"foo"},{"RecursiveStruct":{"NoRecurse":"bar"}}]}}`, util.Trim(string(body))) @@ -4838,10 +4915,14 @@ func TestInputService16ProtocolTestRecursiveShapesCase6(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct":{"RecursiveMap":{"foo":{"NoRecurse":"foo"},"bar":{"NoRecurse":"bar"}}}}`, util.Trim(string(body))) @@ -4862,10 +4943,14 @@ func TestInputService17ProtocolTestTimestampValuesCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"TimeArg":1422172800}`, util.Trim(string(body))) @@ -4886,13 +4971,17 @@ func TestInputService17ProtocolTestTimestampValuesCase2(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers - assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg")) + if e, a := "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -4906,10 +4995,14 @@ func TestInputService18ProtocolTestNamedLocationsInJSONBodyCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"timestamp_location":1422172800}`, util.Trim(string(body))) @@ -4930,12 +5023,18 @@ func TestInputService19ProtocolTestStringPayloadCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, `bar`, util.Trim(string(body))) + if e, a := "bar", util.Trim(string(body)); e != a { + t.Errorf("expect %v, got %v", e, a) + } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) @@ -4954,10 +5053,14 @@ func TestInputService20ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Token":"abc123"}`, util.Trim(string(body))) @@ -4976,10 +5079,14 @@ func TestInputService20ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert body - assert.NotNil(t, r.Body) + if r.Body == nil { + t.Errorf("expect body not to be nil") + } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Token":"00000000-0000-4000-8000-000000000000"}`, util.Trim(string(body))) @@ -4999,13 +5106,17 @@ func TestInputService21ProtocolTestJSONValueTraitCase1(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers - assert.Equal(t, "eyJGb28iOiJCYXIifQ==", r.Header.Get("X-Amz-Foo")) + if e, a := "eyJGb28iOiJCYXIifQ==", r.Header.Get("X-Amz-Foo"); e != a { + t.Errorf("expect %v to be %v", e, a) + } } @@ -5017,7 +5128,9 @@ func TestInputService21ProtocolTestJSONValueTraitCase2(t *testing.T) { // build request restjson.Build(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect no error, got %v", req.Error) + } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go index 341f3cdaef2..134a1ae3ad9 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_test.go @@ -24,7 +24,6 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restjson" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/util" - "github.com/stretchr/testify/assert" ) var _ bytes.Buffer // always import bytes @@ -1941,21 +1940,47 @@ func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.Char) - assert.Equal(t, 1.3, *out.Double) - assert.Equal(t, false, *out.FalseBool) - assert.Equal(t, 1.2, *out.Float) - assert.Equal(t, "test", *out.ImaHeader) - assert.Equal(t, "abc", *out.ImaHeaderLocation) - assert.Equal(t, int64(200), *out.Long) - assert.Equal(t, int64(123), *out.Num) - assert.Equal(t, int64(200), *out.Status) - assert.Equal(t, "myname", *out.Str) - assert.Equal(t, true, *out.TrueBool) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "a", *out.Char; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := 1.3, *out.Double; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := false, *out.FalseBool; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := 1.2, *out.Float; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "test", *out.ImaHeader; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "abc", *out.ImaHeaderLocation; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(200), *out.Long; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(123), *out.Num; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(200), *out.Status; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "myname", *out.Str; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := true, *out.TrueBool; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1971,12 +1996,20 @@ func TestOutputService2ProtocolTestBlobMembersCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "hi!", string(out.BlobMember)) - assert.Equal(t, "there!", string(out.StructMember.Foo)) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "hi!", string(out.BlobMember); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "there!", string(out.StructMember.Foo); e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -1992,12 +2025,20 @@ func TestOutputService3ProtocolTestTimestampMembersCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String()) - assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String()) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String(); e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2013,12 +2054,20 @@ func TestOutputService4ProtocolTestListsCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.ListMember[0]) - assert.Equal(t, "b", *out.ListMember[1]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "a", *out.ListMember[0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "b", *out.ListMember[1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2034,12 +2083,20 @@ func TestOutputService5ProtocolTestListsWithStructureMemberCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.ListMember[0].Foo) - assert.Equal(t, "b", *out.ListMember[1].Foo) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "a", *out.ListMember[0].Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "b", *out.ListMember[1].Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2055,14 +2112,26 @@ func TestOutputService6ProtocolTestMapsCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, int64(1), *out.MapMember["a"][0]) - assert.Equal(t, int64(2), *out.MapMember["a"][1]) - assert.Equal(t, int64(3), *out.MapMember["b"][0]) - assert.Equal(t, int64(4), *out.MapMember["b"][1]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := int64(1), *out.MapMember["a"][0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(2), *out.MapMember["a"][1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(3), *out.MapMember["b"][0]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := int64(4), *out.MapMember["b"][1]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2078,12 +2147,20 @@ func TestOutputService7ProtocolTestComplexMapValuesCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["a"].String()) - assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["b"].String()) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["a"].String(); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := time.Unix(1.398796238e+09, 0).UTC().String(), out.MapMember["b"].String(); e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2099,10 +2176,14 @@ func TestOutputService8ProtocolTestIgnoresExtraDataCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used + if out == nil { + t.Errorf("expect not to be nil") + } } @@ -2121,15 +2202,29 @@ func TestOutputService9ProtocolTestSupportsHeaderMapsCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "10", *out.AllHeaders["Content-Length"]) - assert.Equal(t, "boo", *out.AllHeaders["X-Bam"]) - assert.Equal(t, "bar", *out.AllHeaders["X-Foo"]) - assert.Equal(t, "boo", *out.PrefixedHeaders["Bam"]) - assert.Equal(t, "bar", *out.PrefixedHeaders["Foo"]) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "10", *out.AllHeaders["Content-Length"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "boo", *out.AllHeaders["X-Bam"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "bar", *out.AllHeaders["X-Foo"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "boo", *out.PrefixedHeaders["Bam"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "bar", *out.PrefixedHeaders["Foo"]; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2146,12 +2241,20 @@ func TestOutputService10ProtocolTestJSONPayloadCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.Data.Foo) - assert.Equal(t, "baz", *out.Header) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "abc", *out.Data.Foo; e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "baz", *out.Header; e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2167,11 +2270,17 @@ func TestOutputService11ProtocolTestStreamingPayloadCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", string(out.Stream)) + if out == nil { + t.Errorf("expect not to be nil") + } + if e, a := "abc", string(out.Stream); e != a { + t.Errorf("expect %v, got %v", e, a) + } } @@ -2188,9 +2297,13 @@ func TestOutputService12ProtocolTestJSONValueTraitCase1(t *testing.T) { // unmarshal response restjson.UnmarshalMeta(req) restjson.Unmarshal(req) - assert.NoError(t, req.Error) + if req.Error != nil { + t.Errorf("expect not error, got %v", req.Error) + } // assert response - assert.NotNil(t, out) // ensure out variable is used + if out == nil { + t.Errorf("expect not to be nil") + } reflect.DeepEqual(out.Attr, map[string]interface{}{"Foo": "Bar"}) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 28c0a1e2e37..ed9b326c0c2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -1377,8 +1377,7 @@ func (c *EC2) AttachVpnGatewayRequest(input *AttachVpnGatewayInput) (req *reques // Attaches a virtual private gateway to a VPC. You can attach one virtual private // gateway to one VPC at a time. // -// For more information, see Adding a Hardware Virtual Private Gateway to Your -// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// For more information, see AWS Managed VPN Connections (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2275,8 +2274,7 @@ func (c *EC2) ConfirmProductInstanceRequest(input *ConfirmProductInstanceInput) // // Determines whether a product code is associated with an instance. This action // can only be used by the owner of the product code. It is useful when a product -// code owner needs to verify whether another user's instance is eligible for -// support. +// code owner must verify whether another user's instance is eligible for support. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2613,9 +2611,9 @@ func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (r // the exception of 7224, which is reserved in the us-east-1 region, and 9059, // which is reserved in the eu-west-1 region. // -// For more information about VPN customer gateways, see Adding a Hardware Virtual -// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) -// in the Amazon Virtual Private Cloud User Guide. +// For more information about VPN customer gateways, see AWS Managed VPN Connections +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) in the +// Amazon Virtual Private Cloud User Guide. // // You cannot create more than one customer gateway with the same VPN type, // IP address, and BGP ASN parameter values. If you run an identical request @@ -3867,8 +3865,8 @@ func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req // CreatePlacementGroup API operation for Amazon Elastic Compute Cloud. // -// Creates a placement group that you launch cluster instances into. You must -// give the group a name that's unique within the scope of your account. +// Creates a placement group that you launch cluster instances into. Give the +// group a name that's unique within the scope of your account. // // For more information about placement groups and cluster instances, see Cluster // Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) @@ -5041,8 +5039,7 @@ func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req * // This is an idempotent operation. If you perform the operation more than once, // Amazon EC2 doesn't return an error. // -// For more information about VPN connections, see Adding a Hardware Virtual -// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// For more information, see AWS Managed VPN Connections (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5124,9 +5121,9 @@ func (c *EC2) CreateVpnConnectionRouteRequest(input *CreateVpnConnectionRouteInp // traffic to be routed from the virtual private gateway to the VPN customer // gateway. // -// For more information about VPN connections, see Adding a Hardware Virtual -// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) -// in the Amazon Virtual Private Cloud User Guide. +// For more information about VPN connections, see AWS Managed VPN Connections +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) in the +// Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5204,8 +5201,8 @@ func (c *EC2) CreateVpnGatewayRequest(input *CreateVpnGatewayInput) (req *reques // on the VPC side of your VPN connection. You can create a virtual private // gateway before creating the VPC itself. // -// For more information about virtual private gateways, see Adding a Hardware -// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// For more information about virtual private gateways, see AWS Managed VPN +// Connections (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7959,9 +7956,9 @@ func (c *EC2) DescribeCustomerGatewaysRequest(input *DescribeCustomerGatewaysInp // // Describes one or more of your VPN customer gateways. // -// For more information about VPN customer gateways, see Adding a Hardware Virtual -// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) -// in the Amazon Virtual Private Cloud User Guide. +// For more information about VPN customer gateways, see AWS Managed VPN Connections +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) in the +// Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13574,9 +13571,9 @@ func (c *EC2) DescribeVpnConnectionsRequest(input *DescribeVpnConnectionsInput) // // Describes one or more of your VPN connections. // -// For more information about VPN connections, see Adding a Hardware Virtual -// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) -// in the Amazon Virtual Private Cloud User Guide. +// For more information about VPN connections, see AWS Managed VPN Connections +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) in the +// Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13652,8 +13649,8 @@ func (c *EC2) DescribeVpnGatewaysRequest(input *DescribeVpnGatewaysInput) (req * // // Describes one or more of your virtual private gateways. // -// For more information about virtual private gateways, see Adding an IPsec -// Hardware VPN to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// For more information about virtual private gateways, see AWS Managed VPN +// Connections (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -15077,7 +15074,7 @@ func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *reques // the Amazon EC2 API and command line interface. // // Instance console output is buffered and posted shortly after instance boot, -// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output +// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output, // which is available for at least one hour after the most recent post. // // For Linux instances, the instance console output displays the exact console @@ -15406,9 +15403,10 @@ func (c *EC2) GetReservedInstancesExchangeQuoteRequest(input *GetReservedInstanc // GetReservedInstancesExchangeQuote API operation for Amazon Elastic Compute Cloud. // -// Returns details about the values and term of your specified Convertible Reserved -// Instances. When a target configuration is specified, it returns information -// about whether the exchange is valid and can be performed. +// Returns a quote and exchange information for exchanging one or more specified +// Convertible Reserved Instances for a new Convertible Reserved Instance. If +// the exchange cannot be performed, the reason is returned in the response. +// Use AcceptReservedInstancesExchangeQuote to perform the exchange. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -16209,15 +16207,15 @@ func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req // ModifyImageAttribute API operation for Amazon Elastic Compute Cloud. // // Modifies the specified attribute of the specified AMI. You can specify only -// one attribute at a time. +// one attribute at a time. You can use the Attribute parameter to specify the +// attribute or one of the following parameters: Description, LaunchPermission, +// or ProductCode. // // AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace // product code cannot be made public. // -// The SriovNetSupport enhanced networking attribute cannot be changed using -// this command. Instead, enable SriovNetSupport on an instance and create an -// AMI from the instance. This will result in an image with SriovNetSupport -// enabled. +// To enable the SriovNetSupport enhanced networking attribute of an image, +// enable SriovNetSupport on an instance and create an AMI from the instance. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -16542,9 +16540,9 @@ func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput // ModifyReservedInstances API operation for Amazon Elastic Compute Cloud. // // Modifies the Availability Zone, instance count, instance type, or network -// platform (EC2-Classic or EC2-VPC) of your Standard Reserved Instances. The -// Reserved Instances to be modified must be identical, except for Availability -// Zone, network platform, and instance type. +// platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved +// Instances to be modified must be identical, except for Availability Zone, +// network platform, and instance type. // // For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -17270,6 +17268,89 @@ func (c *EC2) ModifyVpcPeeringConnectionOptionsWithContext(ctx aws.Context, inpu return out, req.Send() } +const opModifyVpcTenancy = "ModifyVpcTenancy" + +// ModifyVpcTenancyRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcTenancy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpcTenancy for more information on using the ModifyVpcTenancy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpcTenancyRequest method. +// req, resp := client.ModifyVpcTenancyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancy +func (c *EC2) ModifyVpcTenancyRequest(input *ModifyVpcTenancyInput) (req *request.Request, output *ModifyVpcTenancyOutput) { + op := &request.Operation{ + Name: opModifyVpcTenancy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcTenancyInput{} + } + + output = &ModifyVpcTenancyOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpcTenancy API operation for Amazon Elastic Compute Cloud. +// +// Modifies the instance tenancy attribute of the specified VPC. You can change +// the instance tenancy attribute of a VPC to default only. You cannot change +// the instance tenancy attribute to dedicated. +// +// After you modify the tenancy of the VPC, any new instances that you launch +// into the VPC have a tenancy of default, unless you specify otherwise during +// launch. The tenancy of any existing instances in the VPC is not affected. +// +// For more information about Dedicated Instances, see Dedicated Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-instance.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpcTenancy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancy +func (c *EC2) ModifyVpcTenancy(input *ModifyVpcTenancyInput) (*ModifyVpcTenancyOutput, error) { + req, out := c.ModifyVpcTenancyRequest(input) + return out, req.Send() +} + +// ModifyVpcTenancyWithContext is the same as ModifyVpcTenancy with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcTenancy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcTenancyWithContext(ctx aws.Context, input *ModifyVpcTenancyInput, opts ...request.Option) (*ModifyVpcTenancyOutput, error) { + req, out := c.ModifyVpcTenancyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opMonitorInstances = "MonitorInstances" // MonitorInstancesRequest generates a "aws/request.Request" representing the @@ -19490,8 +19571,8 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // not subscribed, the request fails. // // To ensure faster instance launches, break up large requests into smaller -// batches. For example, create 5 separate launch requests for 100 instances -// each instead of 1 launch request for 500 instances. +// batches. For example, create five separate launch requests for 100 instances +// each instead of one launch request for 500 instances. // // An instance is ready for you to use when it's in the running state. You can // check the state of your instance using DescribeInstances. You can tag instances @@ -19665,16 +19746,20 @@ func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Re // StartInstances API operation for Amazon Elastic Compute Cloud. // -// Starts an Amazon EBS-backed AMI that you've previously stopped. +// Starts an Amazon EBS-backed instance that you've previously stopped. // // Instances that use Amazon EBS volumes as their root devices can be quickly // stopped and started. When an instance is stopped, the compute resources are -// released and you are not billed for hourly instance usage. However, your -// root partition Amazon EBS volume remains, continues to persist your data, -// and you are charged for Amazon EBS volume usage. You can restart your instance -// at any time. Each time you transition an instance from stopped to started, -// Amazon EC2 charges a full instance hour, even if transitions happen multiple -// times within a single hour. +// released and you are not billed for instance usage. However, your root partition +// Amazon EBS volume remains and continues to persist your data, and you are +// charged for Amazon EBS volume usage. You can restart your instance at any +// time. Every time you start your Windows instance, Amazon EC2 charges you +// for a full instance hour. If you stop and restart your Windows instance, +// a new instance hour begins and Amazon EC2 charges you for another full instance +// hour even if you are still within the same 60-minute period when it was stopped. +// Every time you start your Linux instance, Amazon EC2 charges a one-minute +// minimum for instance usage, and thereafter charges per second for instance +// usage. // // Before stopping an instance, make sure it is in a state from which it can // be restarted. Stopping an instance does not preserve data stored in RAM. @@ -19759,14 +19844,17 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // // Stops an Amazon EBS-backed instance. // -// We don't charge hourly usage for a stopped instance, or data transfer fees; -// however, your root partition Amazon EBS volume remains, continues to persist -// your data, and you are charged for Amazon EBS volume usage. Each time you -// transition an instance from stopped to started, Amazon EC2 charges a full -// instance hour, even if transitions happen multiple times within a single -// hour. -// -// You can't start or stop Spot instances, and you can't stop instance store-backed +// We don't charge usage for a stopped instance, or data transfer fees; however, +// your root partition Amazon EBS volume remains and continues to persist your +// data, and you are charged for Amazon EBS volume usage. Every time you start +// your Windows instance, Amazon EC2 charges you for a full instance hour. If +// you stop and restart your Windows instance, a new instance hour begins and +// Amazon EC2 charges you for another full instance hour even if you are still +// within the same 60-minute period when it was stopped. Every time you start +// your Linux instance, Amazon EC2 charges a one-minute minimum for instance +// usage, and thereafter charges per second for instance usage. +// +// You can't start or stop Spot Instances, and you can't stop instance store-backed // instances. // // When you stop an instance, we shut it down. You can restart your instance @@ -20311,14 +20399,14 @@ type AcceptReservedInstancesExchangeQuoteInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The IDs of the Convertible Reserved Instances to exchange for other Convertible - // Reserved Instances of the same or higher value. + // The IDs of the Convertible Reserved Instances to exchange for another Convertible + // Reserved Instance of the same or higher value. // // ReservedInstanceIds is a required field ReservedInstanceIds []*string `locationName:"ReservedInstanceId" locationNameList:"ReservedInstanceId" type:"list" required:"true"` - // The configurations of the Convertible Reserved Instance offerings that you - // are purchasing in this exchange. + // The configuration of the target Convertible Reserved Instance to exchange + // for your current Convertible Reserved Instances. TargetConfigurations []*TargetConfigurationRequest `locationName:"TargetConfiguration" locationNameList:"TargetConfigurationRequest" type:"list"` } @@ -22134,8 +22222,7 @@ func (s *AttributeValue) SetValue(v string) *AttributeValue { type AuthorizeSecurityGroupEgressInput struct { _ struct{} `type:"structure"` - // The CIDR IPv4 address range. We recommend that you specify the CIDR range - // in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the CIDR. CidrIp *string `locationName:"cidrIp" type:"string"` // Checks whether you have the required permissions for the action, without @@ -22144,8 +22231,7 @@ type AuthorizeSecurityGroupEgressInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The start of port range for the TCP and UDP protocols, or an ICMP type number. - // We recommend that you specify the port range in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the port. FromPort *int64 `locationName:"fromPort" type:"integer"` // The ID of the security group. @@ -22153,26 +22239,23 @@ type AuthorizeSecurityGroupEgressInput struct { // GroupId is a required field GroupId *string `locationName:"groupId" type:"string" required:"true"` - // A set of IP permissions. You can't specify a destination security group and - // a CIDR IP address range. + // One or more sets of IP permissions. You can't specify a destination security + // group and a CIDR IP address range in the same set of permissions. IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` - // The IP protocol name or number. We recommend that you specify the protocol - // in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the protocol name or + // number. IpProtocol *string `locationName:"ipProtocol" type:"string"` - // The name of a destination security group. To authorize outbound access to - // a destination security group, we recommend that you use a set of IP permissions - // instead. + // Not supported. Use a set of IP permissions to specify a destination security + // group. SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` - // The AWS account number for a destination security group. To authorize outbound - // access to a destination security group, we recommend that you use a set of - // IP permissions instead. + // Not supported. Use a set of IP permissions to specify a destination security + // group. SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` - // The end of port range for the TCP and UDP protocols, or an ICMP type number. - // We recommend that you specify the port range in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the port. ToPort *int64 `locationName:"toPort" type:"integer"` } @@ -22297,8 +22380,8 @@ type AuthorizeSecurityGroupIngressInput struct { // either the security group ID or the security group name in the request. GroupName *string `type:"string"` - // A set of IP permissions. Can be used to specify multiple rules in a single - // command. + // One or more sets of IP permissions. Can be used to specify multiple rules + // in a single command. IpPermissions []*IpPermission `locationNameList:"item" type:"list"` // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). @@ -22318,8 +22401,8 @@ type AuthorizeSecurityGroupIngressInput struct { // be in the same VPC. SourceSecurityGroupName *string `type:"string"` - // [EC2-Classic] The AWS account number for the source security group, if the - // source security group is in a different account. You can't specify this parameter + // [EC2-Classic] The AWS account ID for the source security group, if the source + // security group is in a different account. You can't specify this parameter // in combination with the following parameters: the CIDR IP address range, // the IP protocol, the start of the port range, and the end of the port range. // Creates rules that grant full ICMP, UDP, and TCP access. To create a rule @@ -27360,11 +27443,7 @@ type CreateVpnConnectionInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Indicates whether the VPN connection requires static routes. If you are creating - // a VPN connection for a device that does not support BGP, you must specify - // true. - // - // Default: false + // The options for the VPN connection. Options *VpnConnectionOptionsSpecification `locationName:"options" type:"structure"` // The type of VPN connection (ipsec.1). @@ -27536,6 +27615,13 @@ func (s CreateVpnConnectionRouteOutput) GoString() string { type CreateVpnGatewayInput struct { _ struct{} `type:"structure"` + // A private Autonomous System Number (ASN) for the Amazon side of a BGP session. + // If you're using a 16-bit ASN, it must be in the 64512 to 65534 range. If + // you're using a 32-bit ASN, it must be in the 4200000000 to 4294967294 range. + // + // Default: 64512 + AmazonSideAsn *int64 `type:"long"` + // The Availability Zone for the virtual private gateway. AvailabilityZone *string `type:"string"` @@ -27574,6 +27660,12 @@ func (s *CreateVpnGatewayInput) Validate() error { return nil } +// SetAmazonSideAsn sets the AmazonSideAsn field's value. +func (s *CreateVpnGatewayInput) SetAmazonSideAsn(v int64) *CreateVpnGatewayInput { + s.AmazonSideAsn = &v + return s +} + // SetAvailabilityZone sets the AvailabilityZone field's value. func (s *CreateVpnGatewayInput) SetAvailabilityZone(v string) *CreateVpnGatewayInput { s.AvailabilityZone = &v @@ -32156,7 +32248,7 @@ type DescribeInstanceAttributeOutput struct { // EC2 console, CLI, or API; otherwise, you can. DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` - // Indicates whether the instance is optimized for EBS I/O. + // Indicates whether the instance is optimized for Amazon EBS I/O. EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` // Indicates whether enhanced networking with ENA is enabled. @@ -32188,8 +32280,8 @@ type DescribeInstanceAttributeOutput struct { RootDeviceName *AttributeValue `locationName:"rootDeviceName" type:"structure"` // Indicates whether source/destination checking is enabled. A value of true - // means checking is enabled, and false means checking is disabled. This value - // must be false for a NAT instance to perform NAT. + // means that checking is enabled, and false means that checking is disabled. + // This value must be false for a NAT instance to perform NAT. SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` // Indicates whether enhanced networking with the Intel 82599 Virtual Function @@ -32620,10 +32712,10 @@ type DescribeInstancesInput struct { // | in-use). // // * network-interface.source-dest-check - Whether the network interface - // performs source/destination checking. A value of true means checking is - // enabled, and false means checking is disabled. The value must be false - // for the network interface to perform network address translation (NAT) - // in your VPC. + // performs source/destination checking. A value of true means that checking + // is enabled, and false means that checking is disabled. The value must + // be false for the network interface to perform network address translation + // (NAT) in your VPC. // // * network-interface.subnet-id - The ID of the subnet for the network interface. // @@ -32658,9 +32750,9 @@ type DescribeInstancesInput struct { // ID is created any time you launch an instance. A reservation ID has a // one-to-one relationship with an instance launch request, but can be associated // with more than one instance if you launch multiple instances using the - // same launch request. For example, if you launch one instance, you'll get + // same launch request. For example, if you launch one instance, you get // one reservation ID. If you launch ten instances using the same launch - // request, you'll also get one reservation ID. + // request, you also get one reservation ID. // // * root-device-name - The name of the root device for the instance (for // example, /dev/sda1 or /dev/xvda). @@ -32670,10 +32762,10 @@ type DescribeInstancesInput struct { // // * source-dest-check - Indicates whether the instance performs source/destination // checking. A value of true means that checking is enabled, and false means - // checking is disabled. The value must be false for the instance to perform - // network address translation (NAT) in your VPC. + // that checking is disabled. The value must be false for the instance to + // perform network address translation (NAT) in your VPC. // - // * spot-instance-request-id - The ID of the Spot instance request. + // * spot-instance-request-id - The ID of the Spot Instance request. // // * state-reason-code - The reason code for the state change. // @@ -32690,9 +32782,8 @@ type DescribeInstancesInput struct { // independent of the tag-value filter. For example, if you use both the // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // is), and the tag value X (regardless of the tag's key). If you want to + // list only resources where Purpose is X, see the tag:key=value filter. // // * tag-value - The value of a tag assigned to the resource. This filter // is independent of the tag-key filter. @@ -35133,6 +35224,14 @@ type DescribeSecurityGroupsInput struct { // // Default: Describes all your security groups. GroupNames []*string `locationName:"GroupName" locationNameList:"GroupName" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another request with the returned NextToken value. + // This value can be between 5 and 1000. + MaxResults *int64 `type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` } // String returns the string representation @@ -35169,11 +35268,27 @@ func (s *DescribeSecurityGroupsInput) SetGroupNames(v []*string) *DescribeSecuri return s } +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSecurityGroupsInput) SetMaxResults(v int64) *DescribeSecurityGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSecurityGroupsInput) SetNextToken(v string) *DescribeSecurityGroupsInput { + s.NextToken = &v + return s +} + // Contains the output of DescribeSecurityGroups. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupsResult type DescribeSecurityGroupsOutput struct { _ struct{} `type:"structure"` + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + // Information about one or more security groups. SecurityGroups []*SecurityGroup `locationName:"securityGroupInfo" locationNameList:"item" type:"list"` } @@ -35188,6 +35303,12 @@ func (s DescribeSecurityGroupsOutput) GoString() string { return s.String() } +// SetNextToken sets the NextToken field's value. +func (s *DescribeSecurityGroupsOutput) SetNextToken(v string) *DescribeSecurityGroupsOutput { + s.NextToken = &v + return s +} + // SetSecurityGroups sets the SecurityGroups field's value. func (s *DescribeSecurityGroupsOutput) SetSecurityGroups(v []*SecurityGroup) *DescribeSecurityGroupsOutput { s.SecurityGroups = v @@ -35961,34 +36082,34 @@ type DescribeSpotInstanceRequestsInput struct { // // * launch.key-name - The name of the key pair the instance launched with. // - // * launch.monitoring-enabled - Whether monitoring is enabled for the Spot - // instance. + // * launch.monitoring-enabled - Whether detailed monitoring is enabled for + // the Spot instance. // // * launch.ramdisk-id - The RAM disk ID. // - // * network-interface.network-interface-id - The ID of the network interface. + // * launched-availability-zone - The Availability Zone in which the bid + // is launched. // - // * network-interface.device-index - The index of the device for the network - // interface attachment on the instance. + // * network-interface.addresses.primary - Indicates whether the IP address + // is the primary private IP address. // - // * network-interface.subnet-id - The ID of the subnet for the instance. + // * network-interface.delete-on-termination - Indicates whether the network + // interface is deleted when the instance is terminated. // // * network-interface.description - A description of the network interface. // - // * network-interface.private-ip-address - The primary private IP address - // of the network interface. - // - // * network-interface.delete-on-termination - Indicates whether the network - // interface is deleted when the instance is terminated. + // * network-interface.device-index - The index of the device for the network + // interface attachment on the instance. // // * network-interface.group-id - The ID of the security group associated // with the network interface. // - // * network-interface.group-name - The name of the security group associated - // with the network interface. + // * network-interface.network-interface-id - The ID of the network interface. // - // * network-interface.addresses.primary - Indicates whether the IP address - // is the primary private IP address. + // * network-interface.private-ip-address - The primary private IP address + // of the network interface. + // + // * network-interface.subnet-id - The ID of the subnet for the instance. // // * product-description - The product description associated with the instance // (Linux/UNIX | Windows). @@ -36028,9 +36149,6 @@ type DescribeSpotInstanceRequestsInput struct { // // * type - The type of Spot instance request (one-time | persistent). // - // * launched-availability-zone - The Availability Zone in which the bid - // is launched. - // // * valid-from - The start date of the request. // // * valid-until - The end date of the request. @@ -37947,6 +38065,9 @@ type DescribeVpnGatewaysInput struct { // One or more filters. // + // * amazon-side-asn - The Autonomous System Number (ASN) for the Amazon + // side of the gateway. + // // * attachment.state - The current state of the attachment between the gateway // and the VPC (attaching | attached | detaching | detached). // @@ -40889,7 +41010,7 @@ type GetHostReservationPurchasePreviewOutput struct { // The purchase information of the Dedicated Host Reservation and the Dedicated // Hosts associated with it. - Purchase []*Purchase `locationName:"purchase" type:"list"` + Purchase []*Purchase `locationName:"purchase" locationNameList:"item" type:"list"` // The potential total hourly price of the reservation per hour. TotalHourlyPrice *string `locationName:"totalHourlyPrice" type:"string"` @@ -41044,7 +41165,7 @@ type GetReservedInstancesExchangeQuoteInput struct { // ReservedInstanceIds is a required field ReservedInstanceIds []*string `locationName:"ReservedInstanceId" locationNameList:"ReservedInstanceId" type:"list" required:"true"` - // The configuration requirements of the Convertible Reserved Instances to exchange + // The configuration of the target Convertible Reserved Instance to exchange // for your current Convertible Reserved Instances. TargetConfigurations []*TargetConfigurationRequest `locationName:"TargetConfiguration" locationNameList:"TargetConfigurationRequest" type:"list"` } @@ -43417,7 +43538,7 @@ type Instance struct { // The idempotency token you provided when you launched the instance, if applicable. ClientToken *string `locationName:"clientToken" type:"string"` - // Indicates whether the instance is optimized for EBS I/O. This optimization + // Indicates whether the instance is optimized for Amazon EBS I/O. This optimization // provides dedicated throughput to Amazon EBS and an optimized configuration // stack to provide optimal I/O performance. This optimization isn't available // with all instance types. Additional usage charges apply when using an EBS @@ -43442,7 +43563,7 @@ type Instance struct { // The ID of the instance. InstanceId *string `locationName:"instanceId" type:"string"` - // Indicates whether this is a Spot instance or a Scheduled Instance. + // Indicates whether this is a Spot Instance or a Scheduled Instance. InstanceLifecycle *string `locationName:"instanceLifecycle" type:"string" enum:"InstanceLifecycleType"` // The instance type. @@ -43474,7 +43595,7 @@ type Instance struct { // DNS hostname can only be used inside the Amazon EC2 network. This name is // not available until the instance enters the running state. // - // [EC2-VPC] The Amazon-provided DNS server will resolve Amazon-provided private + // [EC2-VPC] The Amazon-provided DNS server resolves Amazon-provided private // DNS hostnames if you've enabled DNS resolution and DNS hostnames in your // VPC. If you are not using the Amazon-provided DNS server in your VPC, your // custom domain name servers must resolve the hostname as appropriate. @@ -43509,13 +43630,13 @@ type Instance struct { // Specifies whether to enable an instance launched in a VPC to perform NAT. // This controls whether source/destination checking is enabled on the instance. - // A value of true means checking is enabled, and false means checking is disabled. - // The value must be false for the instance to perform NAT. For more information, - // see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // A value of true means that checking is enabled, and false means that checking + // is disabled. The value must be false for the instance to perform NAT. For + // more information, see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) // in the Amazon Virtual Private Cloud User Guide. SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` - // If the request is a Spot instance request, the ID of the request. + // If the request is a Spot Instance request, the ID of the request. SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` // Specifies whether enhanced networking with the Intel 82599 Virtual Function @@ -44901,7 +45022,7 @@ func (s *InternetGatewayAttachment) SetVpcId(v string) *InternetGatewayAttachmen return s } -// Describes a security group rule. +// Describes a set of permissions for a security group rule. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/IpPermission type IpPermission struct { _ struct{} `type:"structure"` @@ -45000,7 +45121,7 @@ type IpRange struct { _ struct{} `type:"structure"` // The IPv4 CIDR range. You can either specify a CIDR range or a source security - // group, not both. To specify a single IPv4 address, use the /32 prefix. + // group, not both. To specify a single IPv4 address, use the /32 prefix length. CidrIp *string `locationName:"cidrIp" type:"string"` // A description for the security group rule that references this IPv4 address @@ -45064,7 +45185,7 @@ type Ipv6Range struct { _ struct{} `type:"structure"` // The IPv6 CIDR range. You can either specify a CIDR range or a source security - // group, not both. To specify a single IPv6 address, use the /128 prefix. + // group, not both. To specify a single IPv6 address, use the /128 prefix length. CidrIpv6 *string `locationName:"cidrIpv6" type:"string"` // A description for the security group rule that references this IPv6 address @@ -45212,9 +45333,6 @@ type LaunchSpecification struct { AddressingType *string `locationName:"addressingType" type:"string"` // One or more block device mapping entries. - // - // Although you can specify encrypted EBS volumes in this block device mapping - // for your Spot Instances, these volumes are not encrypted. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` // Indicates whether the instance is optimized for EBS I/O. This optimization @@ -45869,10 +45987,11 @@ func (s ModifyIdentityIdFormatOutput) GoString() string { type ModifyImageAttributeInput struct { _ struct{} `type:"structure"` - // The name of the attribute to modify. + // The name of the attribute to modify. The valid values are description, launchPermission, + // and productCodes. Attribute *string `type:"string"` - // A description for the AMI. + // A new description for the AMI. Description *AttributeValue `type:"structure"` // Checks whether you have the required permissions for the action, without @@ -45886,26 +46005,27 @@ type ModifyImageAttributeInput struct { // ImageId is a required field ImageId *string `type:"string" required:"true"` - // A launch permission modification. + // A new launch permission for the AMI. LaunchPermission *LaunchPermissionModifications `type:"structure"` - // The operation type. + // The operation type. This parameter can be used only when the Attribute parameter + // is launchPermission. OperationType *string `type:"string" enum:"OperationType"` - // One or more product codes. After you add a product code to an AMI, it can't - // be removed. This is only valid when modifying the productCodes attribute. + // One or more DevPay product codes. After you add a product code to an AMI, + // it can't be removed. ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"` - // One or more user groups. This is only valid when modifying the launchPermission - // attribute. + // One or more user groups. This parameter can be used only when the Attribute + // parameter is launchPermission. UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"` - // One or more AWS account IDs. This is only valid when modifying the launchPermission - // attribute. + // One or more AWS account IDs. This parameter can be used only when the Attribute + // parameter is launchPermission. UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` - // The value of the attribute being modified. This is only valid when modifying - // the description attribute. + // The value of the attribute being modified. This parameter can be used only + // when the Attribute parameter is description or productCodes. Value *string `type:"string"` } @@ -46037,7 +46157,7 @@ type ModifyInstanceAttributeInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Specifies whether the instance is optimized for EBS I/O. This optimization + // Specifies whether the instance is optimized for Amazon EBS I/O. This optimization // provides dedicated throughput to Amazon EBS and an optimized configuration // stack to provide optimal EBS I/O performance. This optimization isn't available // with all instance types. Additional usage charges apply when using an EBS @@ -46080,8 +46200,8 @@ type ModifyInstanceAttributeInput struct { Ramdisk *AttributeValue `locationName:"ramdisk" type:"structure"` // Specifies whether source/destination checking is enabled. A value of true - // means that checking is enabled, and false means checking is disabled. This - // value must be false for a NAT instance to perform NAT. + // means that checking is enabled, and false means that checking is disabled. + // This value must be false for a NAT instance to perform NAT. SourceDestCheck *AttributeBooleanValue `type:"structure"` // Set to simple to enable enhanced networking with the Intel 82599 Virtual @@ -46095,8 +46215,8 @@ type ModifyInstanceAttributeInput struct { SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` // Changes the instance's user data to the specified value. If you are using - // an AWS SDK or command line tool, Base64-encoding is performed for you, and - // you can load the text from a file. Otherwise, you must provide Base64-encoded + // an AWS SDK or command line tool, base64-encoding is performed for you, and + // you can load the text from a file. Otherwise, you must provide base64-encoded // text. UserData *BlobAttributeValue `locationName:"userData" type:"structure"` @@ -47307,6 +47427,97 @@ func (s *ModifyVpcPeeringConnectionOptionsOutput) SetRequesterPeeringConnectionO return s } +// Contains the parameters for ModifyVpcTenancy. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancyRequest +type ModifyVpcTenancyInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the operation, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The instance tenancy attribute for the VPC. + // + // InstanceTenancy is a required field + InstanceTenancy *string `type:"string" required:"true" enum:"VpcTenancy"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcTenancyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcTenancyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcTenancyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcTenancyInput"} + if s.InstanceTenancy == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceTenancy")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpcTenancyInput) SetDryRun(v bool) *ModifyVpcTenancyInput { + s.DryRun = &v + return s +} + +// SetInstanceTenancy sets the InstanceTenancy field's value. +func (s *ModifyVpcTenancyInput) SetInstanceTenancy(v string) *ModifyVpcTenancyInput { + s.InstanceTenancy = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *ModifyVpcTenancyInput) SetVpcId(v string) *ModifyVpcTenancyInput { + s.VpcId = &v + return s +} + +// Contains the output of ModifyVpcTenancy. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancyResult +type ModifyVpcTenancyOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + ReturnValue *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcTenancyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcTenancyOutput) GoString() string { + return s.String() +} + +// SetReturnValue sets the ReturnValue field's value. +func (s *ModifyVpcTenancyOutput) SetReturnValue(v bool) *ModifyVpcTenancyOutput { + s.ReturnValue = &v + return s +} + // Contains the parameters for MonitorInstances. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MonitorInstancesRequest type MonitorInstancesInput struct { @@ -49423,7 +49634,7 @@ type PurchaseHostReservationOutput struct { CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` // Describes the details of the purchase. - Purchase []*Purchase `locationName:"purchase" type:"list"` + Purchase []*Purchase `locationName:"purchase" locationNameList:"item" type:"list"` // The total hourly price of the reservation calculated per hour. TotalHourlyPrice *string `locationName:"totalHourlyPrice" type:"string"` @@ -50867,7 +51078,7 @@ type ReportInstanceStatusInput struct { // Instances is a required field Instances []*string `locationName:"instanceId" locationNameList:"InstanceId" type:"list" required:"true"` - // One or more reason codes that describes the health state of your instance. + // One or more reason codes that describe the health state of your instance. // // * instance-stuck-in-state: My instance is stuck in a state. // @@ -50878,13 +51089,13 @@ type ReportInstanceStatusInput struct { // * password-not-available: A password is not available for my instance. // // * performance-network: My instance is experiencing performance problems - // which I believe are network related. + // that I believe are network related. // // * performance-instance-store: My instance is experiencing performance - // problems which I believe are related to the instance stores. + // problems that I believe are related to the instance stores. // // * performance-ebs-volume: My instance is experiencing performance problems - // which I believe are related to an EBS volume. + // that I believe are related to an EBS volume. // // * performance-other: My instance is experiencing performance problems. // @@ -51302,9 +51513,6 @@ type RequestSpotLaunchSpecification struct { AddressingType *string `locationName:"addressingType" type:"string"` // One or more block device mapping entries. - // - // Although you can specify encrypted EBS volumes in this block device mapping - // for your Spot Instances, these volumes are not encrypted. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` // Indicates whether the instance is optimized for EBS I/O. This optimization @@ -52838,8 +53046,7 @@ func (s *RestoreAddressToClassicOutput) SetStatus(v string) *RestoreAddressToCla type RevokeSecurityGroupEgressInput struct { _ struct{} `type:"structure"` - // The CIDR IP address range. We recommend that you specify the CIDR range in - // a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the CIDR. CidrIp *string `locationName:"cidrIp" type:"string"` // Checks whether you have the required permissions for the action, without @@ -52848,8 +53055,7 @@ type RevokeSecurityGroupEgressInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The start of port range for the TCP and UDP protocols, or an ICMP type number. - // We recommend that you specify the port range in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the port. FromPort *int64 `locationName:"fromPort" type:"integer"` // The ID of the security group. @@ -52857,26 +53063,23 @@ type RevokeSecurityGroupEgressInput struct { // GroupId is a required field GroupId *string `locationName:"groupId" type:"string" required:"true"` - // A set of IP permissions. You can't specify a destination security group and - // a CIDR IP address range. + // One or more sets of IP permissions. You can't specify a destination security + // group and a CIDR IP address range in the same set of permissions. IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` - // The IP protocol name or number. We recommend that you specify the protocol - // in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the protocol name or + // number. IpProtocol *string `locationName:"ipProtocol" type:"string"` - // The name of a destination security group. To revoke outbound access to a - // destination security group, we recommend that you use a set of IP permissions - // instead. + // Not supported. Use a set of IP permissions to specify a destination security + // group. SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` - // The AWS account number for a destination security group. To revoke outbound - // access to a destination security group, we recommend that you use a set of - // IP permissions instead. + // Not supported. Use a set of IP permissions to specify a destination security + // group. SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` - // The end of port range for the TCP and UDP protocols, or an ICMP type number. - // We recommend that you specify the port range in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the port. ToPort *int64 `locationName:"toPort" type:"integer"` } @@ -52991,15 +53194,17 @@ type RevokeSecurityGroupIngressInput struct { // For the ICMP type number, use -1 to specify all ICMP types. FromPort *int64 `type:"integer"` - // The ID of the security group. Required for a security group in a nondefault - // VPC. + // The ID of the security group. You must specify either the security group + // ID or the security group name in the request. For security groups in a nondefault + // VPC, you must specify the security group ID. GroupId *string `type:"string"` - // [EC2-Classic, default VPC] The name of the security group. + // [EC2-Classic, default VPC] The name of the security group. You must specify + // either the security group ID or the security group name in the request. GroupName *string `type:"string"` - // A set of IP permissions. You can't specify a source security group and a - // CIDR IP address range. + // One or more sets of IP permissions. You can't specify a source security group + // and a CIDR IP address range in the same set of permissions. IpPermissions []*IpPermission `locationNameList:"item" type:"list"` // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). @@ -53405,11 +53610,11 @@ type RunInstancesInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Indicates whether the instance is optimized for EBS I/O. This optimization + // Indicates whether the instance is optimized for Amazon EBS I/O. This optimization // provides dedicated throughput to Amazon EBS and an optimized configuration - // stack to provide optimal EBS I/O performance. This optimization isn't available - // with all instance types. Additional usage charges apply when using an EBS-optimized - // instance. + // stack to provide optimal Amazon EBS I/O performance. This optimization isn't + // available with all instance types. Additional usage charges apply when using + // an EBS-optimized instance. // // Default: false EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` @@ -53536,9 +53741,9 @@ type RunInstancesInput struct { // The user data to make available to the instance. For more information, see // Running Commands on Your Linux Instance at Launch (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) // (Linux) and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) - // (Windows). If you are using an AWS SDK or command line tool, Base64-encoding - // is performed for you, and you can load the text from a file. Otherwise, you - // must provide Base64-encoded text. + // (Windows). If you are using a command line tool, base64-encoding is performed + // for you, and you can load the text from a file. Otherwise, you must provide + // base64-encoded text. UserData *string `type:"string"` } @@ -56909,7 +57114,7 @@ type StateReason struct { // // * Server.ScheduledStop: The instance was stopped due to a scheduled retirement. // - // * Server.SpotInstanceTermination: A Spot instance was terminated due to + // * Server.SpotInstanceTermination: A Spot Instance was terminated due to // an increase in the market price. // // * Client.InternalError: A client error caused the instance to terminate @@ -59598,6 +59803,12 @@ func (s *VpcPeeringConnectionVpcInfo) SetVpcId(v string) *VpcPeeringConnectionVp type VpnConnection struct { _ struct{} `type:"structure"` + // The category of the VPN connection. A value of VPN indicates an AWS VPN connection. + // A value of VPN-Classic indicates an AWS Classic VPN connection. For more + // information, see AWS Managed VPN Categories (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html#vpn-categories) + // in the Amazon Virtual Private Cloud User Guide. + Category *string `locationName:"category" type:"string"` + // The configuration information for the VPN connection's customer gateway (in // the native XML format). This element is always present in the CreateVpnConnection // response; however, it's present in the DescribeVpnConnections response only @@ -59642,6 +59853,12 @@ func (s VpnConnection) GoString() string { return s.String() } +// SetCategory sets the Category field's value. +func (s *VpnConnection) SetCategory(v string) *VpnConnection { + s.Category = &v + return s +} + // SetCustomerGatewayConfiguration sets the CustomerGatewayConfiguration field's value. func (s *VpnConnection) SetCustomerGatewayConfiguration(v string) *VpnConnection { s.CustomerGatewayConfiguration = &v @@ -59733,9 +59950,15 @@ func (s *VpnConnectionOptions) SetStaticRoutesOnly(v bool) *VpnConnectionOptions type VpnConnectionOptionsSpecification struct { _ struct{} `type:"structure"` - // Indicates whether the VPN connection uses static routes only. Static routes - // must be used for devices that don't support BGP. + // Indicate whether the VPN connection uses static routes only. If you are creating + // a VPN connection for a device that does not support BGP, you must specify + // true. + // + // Default: false StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + + // The tunnel options for the VPN connection. + TunnelOptions []*VpnTunnelOptionsSpecification `locationNameList:"item" type:"list"` } // String returns the string representation @@ -59754,11 +59977,20 @@ func (s *VpnConnectionOptionsSpecification) SetStaticRoutesOnly(v bool) *VpnConn return s } +// SetTunnelOptions sets the TunnelOptions field's value. +func (s *VpnConnectionOptionsSpecification) SetTunnelOptions(v []*VpnTunnelOptionsSpecification) *VpnConnectionOptionsSpecification { + s.TunnelOptions = v + return s +} + // Describes a virtual private gateway. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnGateway type VpnGateway struct { _ struct{} `type:"structure"` + // The private Autonomous System Number (ASN) for the Amazon side of a BGP session. + AmazonSideAsn *int64 `locationName:"amazonSideAsn" type:"long"` + // The Availability Zone where the virtual private gateway was created, if applicable. // This field may be empty or not returned. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` @@ -59789,6 +60021,12 @@ func (s VpnGateway) GoString() string { return s.String() } +// SetAmazonSideAsn sets the AmazonSideAsn field's value. +func (s *VpnGateway) SetAmazonSideAsn(v int64) *VpnGateway { + s.AmazonSideAsn = &v + return s +} + // SetAvailabilityZone sets the AvailabilityZone field's value. func (s *VpnGateway) SetAvailabilityZone(v string) *VpnGateway { s.AvailabilityZone = &v @@ -59868,6 +60106,63 @@ func (s *VpnStaticRoute) SetState(v string) *VpnStaticRoute { return s } +// The tunnel options for a VPN connection. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnTunnelOptionsSpecification +type VpnTunnelOptionsSpecification struct { + _ struct{} `type:"structure"` + + // The pre-shared key (PSK) to establish initial authentication between the + // virtual private gateway and customer gateway. + // + // Constraints: Allowed characters are alphanumeric characters and ._. Must + // be between 8 and 64 characters in length and cannot start with zero (0). + PreSharedKey *string `type:"string"` + + // The range of inside IP addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same virtual private + // gateway. + // + // Constraints: A size /30 CIDR block from the 169.254.0.0/16 range. The following + // CIDR blocks are reserved and cannot be used: + // + // * 169.254.0.0/30 + // + // * 169.254.1.0/30 + // + // * 169.254.2.0/30 + // + // * 169.254.3.0/30 + // + // * 169.254.4.0/30 + // + // * 169.254.5.0/30 + // + // * 169.254.169.252/30 + TunnelInsideCidr *string `type:"string"` +} + +// String returns the string representation +func (s VpnTunnelOptionsSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnTunnelOptionsSpecification) GoString() string { + return s.String() +} + +// SetPreSharedKey sets the PreSharedKey field's value. +func (s *VpnTunnelOptionsSpecification) SetPreSharedKey(v string) *VpnTunnelOptionsSpecification { + s.PreSharedKey = &v + return s +} + +// SetTunnelInsideCidr sets the TunnelInsideCidr field's value. +func (s *VpnTunnelOptionsSpecification) SetTunnelInsideCidr(v string) *VpnTunnelOptionsSpecification { + s.TunnelInsideCidr = &v + return s +} + const ( // AccountAttributeNameSupportedPlatforms is a AccountAttributeName enum value AccountAttributeNameSupportedPlatforms = "supported-platforms" @@ -60630,6 +60925,15 @@ const ( // InstanceTypeP216xlarge is a InstanceType enum value InstanceTypeP216xlarge = "p2.16xlarge" + // InstanceTypeP32xlarge is a InstanceType enum value + InstanceTypeP32xlarge = "p3.2xlarge" + + // InstanceTypeP38xlarge is a InstanceType enum value + InstanceTypeP38xlarge = "p3.8xlarge" + + // InstanceTypeP316xlarge is a InstanceType enum value + InstanceTypeP316xlarge = "p3.16xlarge" + // InstanceTypeD2Xlarge is a InstanceType enum value InstanceTypeD2Xlarge = "d2.xlarge" @@ -61368,6 +61672,11 @@ const ( VpcStateAvailable = "available" ) +const ( + // VpcTenancyDefault is a VpcTenancy enum value + VpcTenancyDefault = "default" +) + const ( // VpnStatePending is a VpnState enum value VpnStatePending = "pending" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go index 195d9b55b8d..08ff2cf0b67 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations_test.go @@ -3,22 +3,27 @@ package ec2_test import ( "io/ioutil" "net/url" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/awstesting/unit" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/stretchr/testify/assert" ) func TestCopySnapshotPresignedURL(t *testing.T) { svc := ec2.New(unit.Session, &aws.Config{Region: aws.String("us-west-2")}) - assert.NotPanics(t, func() { + func() { + defer func() { + if r := recover(); r != nil { + t.Fatalf("expect CopySnapshotRequest with nill") + } + }() // Doesn't panic on nil input req, _ := svc.CopySnapshotRequest(nil) req.Sign() - }) + }() req, _ := svc.CopySnapshotRequest(&ec2.CopySnapshotInput{ SourceRegion: aws.String("us-west-1"), @@ -29,7 +34,15 @@ func TestCopySnapshotPresignedURL(t *testing.T) { b, _ := ioutil.ReadAll(req.HTTPRequest.Body) q, _ := url.ParseQuery(string(b)) u, _ := url.QueryUnescape(q.Get("PresignedUrl")) - assert.Equal(t, "us-west-2", q.Get("DestinationRegion")) - assert.Equal(t, "us-west-1", q.Get("SourceRegion")) - assert.Regexp(t, `^https://ec2\.us-west-1\.amazonaws\.com/.+&DestinationRegion=us-west-2`, u) + if e, a := "us-west-2", q.Get("DestinationRegion"); e != a { + t.Errorf("expect %v, got %v", e, a) + } + if e, a := "us-west-1", q.Get("SourceRegion"); e != a { + t.Errorf("expect %v, got %v", e, a) + } + + r := regexp.MustCompile(`^https://ec2\.us-west-1\.amazonaws\.com/.+&DestinationRegion=us-west-2`) + if !r.MatchString(u) { + t.Errorf("expect %v to match, got %v", r.String(), u) + } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go index 6914a666bb7..0469f0f01a1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go @@ -1025,6 +1025,11 @@ func (c *EC2) WaitUntilSpotInstanceRequestFulfilledWithContext(ctx aws.Context, Matcher: request.PathAllWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", Expected: "fulfilled", }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", + Expected: "request-canceled-and-instance-running", + }, { State: request.FailureWaiterState, Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index f9136852fda..5938049e376 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -329,9 +329,9 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req * // CompleteLayerUpload API operation for Amazon EC2 Container Registry. // -// Inform Amazon ECR that the image layer upload for a specified registry, repository -// name, and upload ID, has completed. You can optionally provide a sha256 digest -// of the image layer for data validation purposes. +// Informs Amazon ECR that the image layer upload has completed for a specified +// registry, repository name, and upload ID. You can optionally provide a sha256 +// digest of the image layer for data validation purposes. // // This operation is used by the Amazon ECR proxy, and it is not intended for // general use by customers for pulling and pushing images. In most cases, you @@ -487,6 +487,96 @@ func (c *ECR) CreateRepositoryWithContext(ctx aws.Context, input *CreateReposito return out, req.Send() } +const opDeleteLifecyclePolicy = "DeleteLifecyclePolicy" + +// DeleteLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLifecyclePolicy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLifecyclePolicy for more information on using the DeleteLifecyclePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLifecyclePolicyRequest method. +// req, resp := client.DeleteLifecyclePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy +func (c *ECR) DeleteLifecyclePolicyRequest(input *DeleteLifecyclePolicyInput) (req *request.Request, output *DeleteLifecyclePolicyOutput) { + op := &request.Operation{ + Name: opDeleteLifecyclePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLifecyclePolicyInput{} + } + + output = &DeleteLifecyclePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteLifecyclePolicy API operation for Amazon EC2 Container Registry. +// +// Deletes the specified lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DeleteLifecyclePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy +func (c *ECR) DeleteLifecyclePolicy(input *DeleteLifecyclePolicyInput) (*DeleteLifecyclePolicyOutput, error) { + req, out := c.DeleteLifecyclePolicyRequest(input) + return out, req.Send() +} + +// DeleteLifecyclePolicyWithContext is the same as DeleteLifecyclePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLifecyclePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DeleteLifecyclePolicyWithContext(ctx aws.Context, input *DeleteLifecyclePolicyInput, opts ...request.Option) (*DeleteLifecyclePolicyOutput, error) { + req, out := c.DeleteLifecyclePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteRepository = "DeleteRepository" // DeleteRepositoryRequest generates a "aws/request.Request" representing the @@ -1155,6 +1245,186 @@ func (c *ECR) GetDownloadUrlForLayerWithContext(ctx aws.Context, input *GetDownl return out, req.Send() } +const opGetLifecyclePolicy = "GetLifecyclePolicy" + +// GetLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetLifecyclePolicy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLifecyclePolicy for more information on using the GetLifecyclePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLifecyclePolicyRequest method. +// req, resp := client.GetLifecyclePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy +func (c *ECR) GetLifecyclePolicyRequest(input *GetLifecyclePolicyInput) (req *request.Request, output *GetLifecyclePolicyOutput) { + op := &request.Operation{ + Name: opGetLifecyclePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLifecyclePolicyInput{} + } + + output = &GetLifecyclePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLifecyclePolicy API operation for Amazon EC2 Container Registry. +// +// Retrieves the specified lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetLifecyclePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy +func (c *ECR) GetLifecyclePolicy(input *GetLifecyclePolicyInput) (*GetLifecyclePolicyOutput, error) { + req, out := c.GetLifecyclePolicyRequest(input) + return out, req.Send() +} + +// GetLifecyclePolicyWithContext is the same as GetLifecyclePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetLifecyclePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyWithContext(ctx aws.Context, input *GetLifecyclePolicyInput, opts ...request.Option) (*GetLifecyclePolicyOutput, error) { + req, out := c.GetLifecyclePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetLifecyclePolicyPreview = "GetLifecyclePolicyPreview" + +// GetLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the +// client's request for the GetLifecyclePolicyPreview operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLifecyclePolicyPreview for more information on using the GetLifecyclePolicyPreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLifecyclePolicyPreviewRequest method. +// req, resp := client.GetLifecyclePolicyPreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview +func (c *ECR) GetLifecyclePolicyPreviewRequest(input *GetLifecyclePolicyPreviewInput) (req *request.Request, output *GetLifecyclePolicyPreviewOutput) { + op := &request.Operation{ + Name: opGetLifecyclePolicyPreview, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLifecyclePolicyPreviewInput{} + } + + output = &GetLifecyclePolicyPreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// +// Retrieves the results of the specified lifecycle policy preview request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetLifecyclePolicyPreview for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyPreviewNotFoundException "LifecyclePolicyPreviewNotFoundException" +// There is no dry run for this repository. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview +func (c *ECR) GetLifecyclePolicyPreview(input *GetLifecyclePolicyPreviewInput) (*GetLifecyclePolicyPreviewOutput, error) { + req, out := c.GetLifecyclePolicyPreviewRequest(input) + return out, req.Send() +} + +// GetLifecyclePolicyPreviewWithContext is the same as GetLifecyclePolicyPreview with the addition of +// the ability to pass a context and additional request options. +// +// See GetLifecyclePolicyPreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyPreviewWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, opts ...request.Option) (*GetLifecyclePolicyPreviewOutput, error) { + req, out := c.GetLifecyclePolicyPreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetRepositoryPolicy = "GetRepositoryPolicy" // GetRepositoryPolicyRequest generates a "aws/request.Request" representing the @@ -1556,8 +1826,8 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu // repository and ensure that you are performing operations on the correct registry. // // * ErrCodeImageAlreadyExistsException "ImageAlreadyExistsException" -// The specified image has already been pushed, and there are no changes to -// the manifest or image tag since the last push. +// The specified image has already been pushed, and there were no changes to +// the manifest or image tag after the last push. // // * ErrCodeLayersNotFoundException "LayersNotFoundException" // The specified layers could not be found, or the specified layer is not valid @@ -1591,58 +1861,58 @@ func (c *ECR) PutImageWithContext(ctx aws.Context, input *PutImageInput, opts .. return out, req.Send() } -const opSetRepositoryPolicy = "SetRepositoryPolicy" +const opPutLifecyclePolicy = "PutLifecyclePolicy" -// SetRepositoryPolicyRequest generates a "aws/request.Request" representing the -// client's request for the SetRepositoryPolicy operation. The "output" return +// PutLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutLifecyclePolicy operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See SetRepositoryPolicy for more information on using the SetRepositoryPolicy +// See PutLifecyclePolicy for more information on using the PutLifecyclePolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the SetRepositoryPolicyRequest method. -// req, resp := client.SetRepositoryPolicyRequest(params) +// // Example sending a request using the PutLifecyclePolicyRequest method. +// req, resp := client.PutLifecyclePolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy -func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy +func (c *ECR) PutLifecyclePolicyRequest(input *PutLifecyclePolicyInput) (req *request.Request, output *PutLifecyclePolicyOutput) { op := &request.Operation{ - Name: opSetRepositoryPolicy, + Name: opPutLifecyclePolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &SetRepositoryPolicyInput{} + input = &PutLifecyclePolicyInput{} } - output = &SetRepositoryPolicyOutput{} + output = &PutLifecyclePolicyOutput{} req = c.newRequest(op, input, output) return } -// SetRepositoryPolicy API operation for Amazon EC2 Container Registry. +// PutLifecyclePolicy API operation for Amazon EC2 Container Registry. // -// Applies a repository policy on a specified repository to control access permissions. +// Creates or updates a lifecycle policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon EC2 Container Registry's -// API operation SetRepositoryPolicy for usage and error information. +// API operation PutLifecyclePolicy for usage and error information. // // Returned Error Codes: // * ErrCodeServerException "ServerException" @@ -1656,84 +1926,80 @@ func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req * // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy -func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { - req, out := c.SetRepositoryPolicyRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy +func (c *ECR) PutLifecyclePolicy(input *PutLifecyclePolicyInput) (*PutLifecyclePolicyOutput, error) { + req, out := c.PutLifecyclePolicyRequest(input) return out, req.Send() } -// SetRepositoryPolicyWithContext is the same as SetRepositoryPolicy with the addition of +// PutLifecyclePolicyWithContext is the same as PutLifecyclePolicy with the addition of // the ability to pass a context and additional request options. // -// See SetRepositoryPolicy for details on how to use this API operation. +// See PutLifecyclePolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetRepositoryPolicyInput, opts ...request.Option) (*SetRepositoryPolicyOutput, error) { - req, out := c.SetRepositoryPolicyRequest(input) +func (c *ECR) PutLifecyclePolicyWithContext(ctx aws.Context, input *PutLifecyclePolicyInput, opts ...request.Option) (*PutLifecyclePolicyOutput, error) { + req, out := c.PutLifecyclePolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUploadLayerPart = "UploadLayerPart" +const opSetRepositoryPolicy = "SetRepositoryPolicy" -// UploadLayerPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadLayerPart operation. The "output" return +// SetRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetRepositoryPolicy operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UploadLayerPart for more information on using the UploadLayerPart +// See SetRepositoryPolicy for more information on using the SetRepositoryPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UploadLayerPartRequest method. -// req, resp := client.UploadLayerPartRequest(params) +// // Example sending a request using the SetRepositoryPolicyRequest method. +// req, resp := client.SetRepositoryPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart -func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy +func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { op := &request.Operation{ - Name: opUploadLayerPart, + Name: opSetRepositoryPolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UploadLayerPartInput{} + input = &SetRepositoryPolicyInput{} } - output = &UploadLayerPartOutput{} + output = &SetRepositoryPolicyOutput{} req = c.newRequest(op, input, output) return } -// UploadLayerPart API operation for Amazon EC2 Container Registry. -// -// Uploads an image layer part to Amazon ECR. +// SetRepositoryPolicy API operation for Amazon EC2 Container Registry. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// Applies a repository policy on a specified repository to control access permissions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon EC2 Container Registry's -// API operation UploadLayerPart for usage and error information. +// API operation SetRepositoryPolicy for usage and error information. // // Returned Error Codes: // * ErrCodeServerException "ServerException" @@ -1743,8 +2009,194 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request. // The specified parameter is invalid. Review the available parameters for the // API request. // -// * ErrCodeInvalidLayerPartException "InvalidLayerPartException" -// The layer part size is not valid, or the first byte specified is not consecutive +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy +func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + return out, req.Send() +} + +// SetRepositoryPolicyWithContext is the same as SetRepositoryPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See SetRepositoryPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetRepositoryPolicyInput, opts ...request.Option) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview" + +// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the +// client's request for the StartLifecyclePolicyPreview operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartLifecyclePolicyPreviewRequest method. +// req, resp := client.StartLifecyclePolicyPreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) { + op := &request.Operation{ + Name: opStartLifecyclePolicyPreview, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartLifecyclePolicyPreviewInput{} + } + + output = &StartLifecyclePolicyPreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// +// Starts a preview of the specified lifecycle policy. This allows you to see +// the results before creating the lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation StartLifecyclePolicyPreview for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// * ErrCodeLifecyclePolicyPreviewInProgressException "LifecyclePolicyPreviewInProgressException" +// The previous lifecycle policy preview request has not completed. Please try +// again later. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + return out, req.Send() +} + +// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of +// the ability to pass a context and additional request options. +// +// See StartLifecyclePolicyPreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadLayerPart = "UploadLayerPart" + +// UploadLayerPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadLayerPart operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadLayerPart for more information on using the UploadLayerPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadLayerPartRequest method. +// req, resp := client.UploadLayerPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart +func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) { + op := &request.Operation{ + Name: opUploadLayerPart, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadLayerPartInput{} + } + + output = &UploadLayerPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadLayerPart API operation for Amazon EC2 Container Registry. +// +// Uploads an image layer part to Amazon ECR. +// +// This operation is used by the Amazon ECR proxy, and it is not intended for +// general use by customers for pulling and pushing images. In most cases, you +// should use the docker CLI to pull, tag, and push images. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation UploadLayerPart for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeInvalidLayerPartException "InvalidLayerPartException" +// The layer part size is not valid, or the first byte specified is not consecutive // to the last byte of a previous layer part upload. // // * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" @@ -2360,11 +2812,115 @@ func (s *CreateRepositoryOutput) SetRepository(v *Repository) *CreateRepositoryO return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicyRequest +type DeleteLifecyclePolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the repository policy + // to
 delete. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLifecyclePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecyclePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLifecyclePolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteLifecyclePolicyInput) SetRegistryId(v string) *DeleteLifecyclePolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteLifecyclePolicyInput) SetRepositoryName(v string) *DeleteLifecyclePolicyInput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicyResponse +type DeleteLifecyclePolicyOutput struct { + _ struct{} `type:"structure"` + + // The time stamp of the last time that the lifecycle policy was run. + LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp" timestampFormat:"unix"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DeleteLifecyclePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecyclePolicyOutput) GoString() string { + return s.String() +} + +// SetLastEvaluatedAt sets the LastEvaluatedAt field's value. +func (s *DeleteLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *DeleteLifecyclePolicyOutput { + s.LastEvaluatedAt = &v + return s +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *DeleteLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *DeleteLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteLifecyclePolicyOutput) SetRegistryId(v string) *DeleteLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteLifecyclePolicyOutput) SetRepositoryName(v string) *DeleteLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryRequest type DeleteRepositoryInput struct { _ struct{} `type:"structure"` - // Force the deletion of the repository if it contains images. + // If a repository contains images, forces the deletion. Force *bool `locationName:"force" type:"boolean"` // The AWS account ID associated with the registry that contains the repository @@ -2990,33 +3546,33 @@ func (s *GetDownloadUrlForLayerOutput) SetLayerDigest(v string) *GetDownloadUrlF return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyRequest -type GetRepositoryPolicyInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyRequest +type GetLifecyclePolicyInput struct { _ struct{} `type:"structure"` // The AWS account ID associated with the registry that contains the repository. // If you do not specify a registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The name of the repository whose policy you want to retrieve. + // The name of the repository with the policy to retrieve. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` } // String returns the string representation -func (s GetRepositoryPolicyInput) String() string { +func (s GetLifecyclePolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRepositoryPolicyInput) GoString() string { +func (s GetLifecyclePolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetRepositoryPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetRepositoryPolicyInput"} +func (s *GetLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyInput"} if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } @@ -3031,23 +3587,26 @@ func (s *GetRepositoryPolicyInput) Validate() error { } // SetRegistryId sets the RegistryId field's value. -func (s *GetRepositoryPolicyInput) SetRegistryId(v string) *GetRepositoryPolicyInput { +func (s *GetLifecyclePolicyInput) SetRegistryId(v string) *GetLifecyclePolicyInput { s.RegistryId = &v return s } // SetRepositoryName sets the RepositoryName field's value. -func (s *GetRepositoryPolicyInput) SetRepositoryName(v string) *GetRepositoryPolicyInput { +func (s *GetLifecyclePolicyInput) SetRepositoryName(v string) *GetLifecyclePolicyInput { s.RepositoryName = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyResponse -type GetRepositoryPolicyOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyResponse +type GetLifecyclePolicyOutput struct { _ struct{} `type:"structure"` - // The JSON repository policy text associated with the repository. - PolicyText *string `locationName:"policyText" type:"string"` + // The time stamp of the last time that the lifecycle policy was run. + LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp" timestampFormat:"unix"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` // The registry ID associated with the request. RegistryId *string `locationName:"registryId" type:"string"` @@ -3057,16 +3616,303 @@ type GetRepositoryPolicyOutput struct { } // String returns the string representation -func (s GetRepositoryPolicyOutput) String() string { +func (s GetLifecyclePolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRepositoryPolicyOutput) GoString() string { +func (s GetLifecyclePolicyOutput) GoString() string { return s.String() } -// SetPolicyText sets the PolicyText field's value. +// SetLastEvaluatedAt sets the LastEvaluatedAt field's value. +func (s *GetLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *GetLifecyclePolicyOutput { + s.LastEvaluatedAt = &v + return s +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *GetLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyOutput) SetRegistryId(v string) *GetLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyOutput) SetRepositoryName(v string) *GetLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreviewRequest +type GetLifecyclePolicyPreviewInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that filters results based on image tag status and + // all tags, if tagged. + Filter *LifecyclePolicyPreviewFilter `locationName:"filter" type:"structure"` + + // The list of imageIDs to be included. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` + + // The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest + // in
 paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest + // only returns
 maxResults results in a single page along with a nextToken + // response element. The remaining results of the initial request can be seen + // by sending
 another GetLifecyclePolicyPreviewRequest request with the returned + // nextToken
 value. This value can be between 1 and 100. If this
 parameter + // is not used, then GetLifecyclePolicyPreviewRequest returns up to
 100 results + // and a nextToken value, if
 applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated
 GetLifecyclePolicyPreviewRequest + // request where maxResults was used and the
 results exceeded the value of + // that parameter. Pagination continues from the end of the
 previous results + // that returned the nextToken value. This value is
 null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository with the policy to retrieve. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLifecyclePolicyPreviewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyPreviewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLifecyclePolicyPreviewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyPreviewInput"} + if s.ImageIds != nil && len(s.ImageIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *GetLifecyclePolicyPreviewInput) SetFilter(v *LifecyclePolicyPreviewFilter) *GetLifecyclePolicyPreviewInput { + s.Filter = v + return s +} + +// SetImageIds sets the ImageIds field's value. +func (s *GetLifecyclePolicyPreviewInput) SetImageIds(v []*ImageIdentifier) *GetLifecyclePolicyPreviewInput { + s.ImageIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetLifecyclePolicyPreviewInput) SetMaxResults(v int64) *GetLifecyclePolicyPreviewInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetLifecyclePolicyPreviewInput) SetNextToken(v string) *GetLifecyclePolicyPreviewInput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyPreviewInput) SetRegistryId(v string) *GetLifecyclePolicyPreviewInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyPreviewInput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewInput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreviewResponse +type GetLifecyclePolicyPreviewOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The nextToken value to include in a future GetLifecyclePolicyPreview request. + // When the results of a GetLifecyclePolicyPreview request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The results of the lifecycle policy preview request. + PreviewResults []*LifecyclePolicyPreviewResult `locationName:"previewResults" type:"list"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The status of the lifecycle policy preview request. + Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"` + + // The list of images that is returned as a result of the action. + Summary *LifecyclePolicyPreviewSummary `locationName:"summary" type:"structure"` +} + +// String returns the string representation +func (s GetLifecyclePolicyPreviewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyPreviewOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyPreviewOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetNextToken(v string) *GetLifecyclePolicyPreviewOutput { + s.NextToken = &v + return s +} + +// SetPreviewResults sets the PreviewResults field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetPreviewResults(v []*LifecyclePolicyPreviewResult) *GetLifecyclePolicyPreviewOutput { + s.PreviewResults = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetRegistryId(v string) *GetLifecyclePolicyPreviewOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewOutput { + s.RepositoryName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetStatus(v string) *GetLifecyclePolicyPreviewOutput { + s.Status = &v + return s +} + +// SetSummary sets the Summary field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetSummary(v *LifecyclePolicyPreviewSummary) *GetLifecyclePolicyPreviewOutput { + s.Summary = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyRequest +type GetRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository with the policy to retrieve. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRepositoryPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRepositoryPolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetRepositoryPolicyInput) SetRegistryId(v string) *GetRepositoryPolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetRepositoryPolicyInput) SetRepositoryName(v string) *GetRepositoryPolicyInput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyResponse +type GetRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text associated with the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s GetRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicyText sets the PolicyText field's value. func (s *GetRepositoryPolicyOutput) SetPolicyText(v string) *GetRepositoryPolicyOutput { s.PolicyText = &v return s @@ -3293,11 +4139,11 @@ func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier { type InitiateLayerUploadInput struct { _ struct{} `type:"structure"` - // The AWS account ID associated with the registry that you intend to upload - // layers to. If you do not specify a registry, the default registry is assumed. + // The AWS account ID associated with the registry to which you intend to upload + // layers. If you do not specify a registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The name of the repository that you intend to upload layers to. + // The name of the repository to which you intend to upload layers. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -3472,6 +4318,143 @@ func (s *LayerFailure) SetLayerDigest(v string) *LayerFailure { return s } +// The filter for the lifecycle policy preview. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyPreviewFilter +type LifecyclePolicyPreviewFilter struct { + _ struct{} `type:"structure"` + + // The tag status of the image. + TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewFilter) GoString() string { + return s.String() +} + +// SetTagStatus sets the TagStatus field's value. +func (s *LifecyclePolicyPreviewFilter) SetTagStatus(v string) *LifecyclePolicyPreviewFilter { + s.TagStatus = &v + return s +} + +// The result of the lifecycle policy preview. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyPreviewResult +type LifecyclePolicyPreviewResult struct { + _ struct{} `type:"structure"` + + // The type of action to be taken. + Action *LifecyclePolicyRuleAction `locationName:"action" type:"structure"` + + // The priority of the applied rule. + AppliedRulePriority *int64 `locationName:"appliedRulePriority" min:"1" type:"integer"` + + // The sha256 digest of the image manifest. + ImageDigest *string `locationName:"imageDigest" type:"string"` + + // The date and time, expressed in standard JavaScript date format, at which + // the current image was pushed to the repository. + ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp" timestampFormat:"unix"` + + // The list of tags associated with this image. + ImageTags []*string `locationName:"imageTags" type:"list"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewResult) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *LifecyclePolicyPreviewResult) SetAction(v *LifecyclePolicyRuleAction) *LifecyclePolicyPreviewResult { + s.Action = v + return s +} + +// SetAppliedRulePriority sets the AppliedRulePriority field's value. +func (s *LifecyclePolicyPreviewResult) SetAppliedRulePriority(v int64) *LifecyclePolicyPreviewResult { + s.AppliedRulePriority = &v + return s +} + +// SetImageDigest sets the ImageDigest field's value. +func (s *LifecyclePolicyPreviewResult) SetImageDigest(v string) *LifecyclePolicyPreviewResult { + s.ImageDigest = &v + return s +} + +// SetImagePushedAt sets the ImagePushedAt field's value. +func (s *LifecyclePolicyPreviewResult) SetImagePushedAt(v time.Time) *LifecyclePolicyPreviewResult { + s.ImagePushedAt = &v + return s +} + +// SetImageTags sets the ImageTags field's value. +func (s *LifecyclePolicyPreviewResult) SetImageTags(v []*string) *LifecyclePolicyPreviewResult { + s.ImageTags = v + return s +} + +// The summary of the lifecycle policy preview request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyPreviewSummary +type LifecyclePolicyPreviewSummary struct { + _ struct{} `type:"structure"` + + // The number of expiring images. + ExpiringImageTotalCount *int64 `locationName:"expiringImageTotalCount" type:"integer"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewSummary) GoString() string { + return s.String() +} + +// SetExpiringImageTotalCount sets the ExpiringImageTotalCount field's value. +func (s *LifecyclePolicyPreviewSummary) SetExpiringImageTotalCount(v int64) *LifecyclePolicyPreviewSummary { + s.ExpiringImageTotalCount = &v + return s +} + +// The type of action to be taken. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyRuleAction +type LifecyclePolicyRuleAction struct { + _ struct{} `type:"structure"` + + // The type of action to be taken. + Type *string `locationName:"type" type:"string" enum:"ImageActionType"` +} + +// String returns the string representation +func (s LifecyclePolicyRuleAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyRuleAction) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *LifecyclePolicyRuleAction) SetType(v string) *LifecyclePolicyRuleAction { + s.Type = &v + return s +} + // An object representing a filter on a ListImages operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImagesFilter type ListImagesFilter struct { @@ -3524,11 +4507,11 @@ type ListImagesInput struct { NextToken *string `locationName:"nextToken" type:"string"` // The AWS account ID associated with the registry that contains the repository - // to list images in. If you do not specify a registry, the default registry + // in which to list images. If you do not specify a registry, the default registry // is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The repository whose image IDs are to be listed. + // The repository with image IDs to be listed. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -3730,28 +4713,138 @@ func (s *PutImageOutput) SetImage(v *Image) *PutImageOutput { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicyRequest +type PutLifecyclePolicyInput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text to apply to the repository. + // + // LifecyclePolicyText is a required field + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do
 not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to receive the policy. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutLifecyclePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecyclePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutLifecyclePolicyInput"} + if s.LifecyclePolicyText == nil { + invalidParams.Add(request.NewErrParamRequired("LifecyclePolicyText")) + } + if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 { + invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *PutLifecyclePolicyInput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyInput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutLifecyclePolicyInput) SetRegistryId(v string) *PutLifecyclePolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutLifecyclePolicyInput) SetRepositoryName(v string) *PutLifecyclePolicyInput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicyResponse +type PutLifecyclePolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s PutLifecyclePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecyclePolicyOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *PutLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutLifecyclePolicyOutput) SetRegistryId(v string) *PutLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutLifecyclePolicyOutput) SetRepositoryName(v string) *PutLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + // An object representing a repository. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/Repository type Repository struct { _ struct{} `type:"structure"` - // The date and time, in JavaScript date/time format, when the repository was - // created. + // The date and time, in JavaScript date format, when the repository was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` // The AWS account ID associated with the registry that contains the repository. RegistryId *string `locationName:"registryId" type:"string"` // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains - // the arn:aws:ecr namespace, followed by the region of the repository, the - // AWS account ID of the repository owner, the repository namespace, and then - // the repository name. For example, arn:aws:ecr:region:012345678910:repository/test. + // the arn:aws:ecr namespace, followed by the region of the repository, AWS + // account ID of the repository owner, repository namespace, and repository + // name. For example, arn:aws:ecr:region:012345678910:repository/test. RepositoryArn *string `locationName:"repositoryArn" type:"string"` // The name of the repository. RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` - // The URI for the repository. You can use this URI for Docker push and pull + // The URI for the repository. You can use this URI for Docker push or pull // operations. RepositoryUri *string `locationName:"repositoryUri" type:"string"` } @@ -3915,6 +5008,122 @@ func (s *SetRepositoryPolicyOutput) SetRepositoryName(v string) *SetRepositoryPo return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreviewRequest +type StartLifecyclePolicyPreviewInput struct { + _ struct{} `type:"structure"` + + // The policy to be evaluated against. If you do not specify a policy, the current + // policy for the repository is used. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to be evaluated. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartLifecyclePolicyPreviewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLifecyclePolicyPreviewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartLifecyclePolicyPreviewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartLifecyclePolicyPreviewInput"} + if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 { + invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *StartLifecyclePolicyPreviewInput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewInput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartLifecyclePolicyPreviewInput) SetRegistryId(v string) *StartLifecyclePolicyPreviewInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartLifecyclePolicyPreviewInput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewInput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreviewResponse +type StartLifecyclePolicyPreviewOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The status of the lifecycle policy preview request. + Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"` +} + +// String returns the string representation +func (s StartLifecyclePolicyPreviewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLifecyclePolicyPreviewOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetRegistryId(v string) *StartLifecyclePolicyPreviewOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewOutput { + s.RepositoryName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetStatus(v string) *StartLifecyclePolicyPreviewOutput { + s.Status = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPartRequest type UploadLayerPartInput struct { _ struct{} `type:"structure"` @@ -3936,11 +5145,11 @@ type UploadLayerPartInput struct { // PartLastByte is a required field PartLastByte *int64 `locationName:"partLastByte" type:"long" required:"true"` - // The AWS account ID associated with the registry that you are uploading layer - // parts to. If you do not specify a registry, the default registry is assumed. + // The AWS account ID associated with the registry to which you are uploading + // layer parts. If you do not specify a registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The name of the repository that you are uploading layer parts to. + // The name of the repository to which you are uploading layer parts. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -4077,6 +5286,11 @@ func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput { return s } +const ( + // ImageActionTypeExpire is a ImageActionType enum value + ImageActionTypeExpire = "EXPIRE" +) + const ( // ImageFailureCodeInvalidImageDigest is a ImageFailureCode enum value ImageFailureCodeInvalidImageDigest = "InvalidImageDigest" @@ -4110,6 +5324,20 @@ const ( LayerFailureCodeMissingLayerDigest = "MissingLayerDigest" ) +const ( + // LifecyclePolicyPreviewStatusInProgress is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusInProgress = "IN_PROGRESS" + + // LifecyclePolicyPreviewStatusComplete is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusComplete = "COMPLETE" + + // LifecyclePolicyPreviewStatusExpired is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusExpired = "EXPIRED" + + // LifecyclePolicyPreviewStatusFailed is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusFailed = "FAILED" +) + const ( // TagStatusTagged is a TagStatus enum value TagStatusTagged = "TAGGED" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go index 987aa72fa68..c2dc9e8a87d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go @@ -3,11 +3,11 @@ // Package ecr provides the client and types for making API // requests to Amazon EC2 Container Registry. // -// Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry -// service. Customers can use the familiar Docker CLI to push, pull, and manage -// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon -// ECR supports private Docker repositories with resource-based permissions -// using AWS IAM so that specific users or Amazon EC2 instances can access repositories +// Amazon EC2 Container Registry (Amazon ECR) is a managed Docker registry service. +// Customers can use the familiar Docker CLI to push, pull, and manage images. +// Amazon ECR provides a secure, scalable, and reliable registry. Amazon ECR +// supports private Docker repositories with resource-based permissions using +// IAM so that specific users or Amazon EC2 instances can access repositories // and images. Developers can use the Docker CLI to author and manage images. // // See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go index 4399e6a2957..2a2caaeddc4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go @@ -13,8 +13,8 @@ const ( // ErrCodeImageAlreadyExistsException for service response error code // "ImageAlreadyExistsException". // - // The specified image has already been pushed, and there are no changes to - // the manifest or image tag since the last push. + // The specified image has already been pushed, and there were no changes to + // the manifest or image tag after the last push. ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException" // ErrCodeImageNotFoundException for service response error code @@ -70,6 +70,25 @@ const ( // for this repository. ErrCodeLayersNotFoundException = "LayersNotFoundException" + // ErrCodeLifecyclePolicyNotFoundException for service response error code + // "LifecyclePolicyNotFoundException". + // + // The lifecycle policy could not be found, and no policy is set to the repository. + ErrCodeLifecyclePolicyNotFoundException = "LifecyclePolicyNotFoundException" + + // ErrCodeLifecyclePolicyPreviewInProgressException for service response error code + // "LifecyclePolicyPreviewInProgressException". + // + // The previous lifecycle policy preview request has not completed. Please try + // again later. + ErrCodeLifecyclePolicyPreviewInProgressException = "LifecyclePolicyPreviewInProgressException" + + // ErrCodeLifecyclePolicyPreviewNotFoundException for service response error code + // "LifecyclePolicyPreviewNotFoundException". + // + // There is no dry run for this repository. + ErrCodeLifecyclePolicyPreviewNotFoundException = "LifecyclePolicyPreviewNotFoundException" + // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations_test.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations_test.go index 6f870d35e27..18972b1f564 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations_test.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations_test.go @@ -3,8 +3,6 @@ package sts_test import ( "testing" - "github.com/stretchr/testify/assert" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/awstesting/unit" "github.com/aws/aws-sdk-go/service/sts" @@ -22,8 +20,12 @@ func TestUnsignedRequest_AssumeRoleWithSAML(t *testing.T) { }) err := req.Sign() - assert.NoError(t, err) - assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) + if err != nil { + t.Errorf("expect no error, got %v", err) + } + if e, a := "", req.HTTPRequest.Header.Get("Authorization"); e != a { + t.Errorf("expect %v, got %v", e, a) + } } func TestUnsignedRequest_AssumeRoleWithWebIdentity(t *testing.T) { @@ -34,6 +36,10 @@ func TestUnsignedRequest_AssumeRoleWithWebIdentity(t *testing.T) { }) err := req.Sign() - assert.NoError(t, err) - assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization")) + if err != nil { + t.Errorf("expect no error, got %v", err) + } + if e, a := "", req.HTTPRequest.Header.Get("Authorization"); e != a { + t.Errorf("expect %v, got %v", e, a) + } } diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap index f0452da6194..d9910601987 100644 --- a/vendor/github.com/docker/distribution/.mailmap +++ b/vendor/github.com/docker/distribution/.mailmap @@ -13,4 +13,6 @@ Sharif Nassar Sharif Nassar Sven Dowideit Vincent Giersch Vincent Giersch davidli davidli -Omer Cohen Omer Cohen \ No newline at end of file +Omer Cohen Omer Cohen +Eric Yang Eric Yang +Nikita Tarasov Nikita diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS index 0857b62fc97..252ff8aa2a6 100644 --- a/vendor/github.com/docker/distribution/AUTHORS +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -1,32 +1,47 @@ +a-palchikov Aaron Lehmann +Aaron Schlesinger Aaron Vinson +Adam Duke Adam Enger Adrian Mouat Ahmet Alp Balkan Alex Chan Alex Elman +Alexey Gladkov +allencloud amitshukla Amy Lindburg +Andrew Hsu Andrew Meredith Andrew T Nguyen Andrey Kostov Andy Goldstein +Anis Elleuch Anton Tiurin Antonio Mercado Antonio Murdaca +Anusha Ragunathan +Arien Holthuizen Arnaud Porterie Arthur Baars Asuka Suzuki Avi Miller Ayose Cazorla BadZen +Ben Bodenmiller Ben Firshman bin liu Brian Bland burnettk Carson A +Cezar Sa Espinola +Charles Smith Chris Dillon +cuiwei13 +cyli Daisuke Fujita +Daniel Huhn Darren Shepherd Dave Trombley Dave Tucker @@ -40,12 +55,17 @@ Diogo Mónica DJ Enriquez Donald Huang Doug Davis +Edgar Lee Eric Yang +Fabio Berchtold +Fabio Huser farmerworking Felix Yan Florentin Raud +Frank Chen Frederick F. Kautz IV gabriell nascimento +Gleb Schukin harche Henri Gomez Hu Keping @@ -54,16 +74,26 @@ HuKeping Ian Babrou igayoso Jack Griffin +James Findley Jason Freidman +Jason Heiss Jeff Nickoloff +Jess Frazelle Jessie Frazelle +jhaohai Jianqing Wang +Jihoon Chung +Joao Fernandes +John Mulhausen John Starks +Jon Johnson Jon Poler Jonathan Boulle Jordan Liggitt +Josh Chorlton Josh Hawn Julien Fernandez +Ke Xu Keerthan Mala Kelsey Hightower Kenneth Lim @@ -71,38 +101,56 @@ Kenny Leung Li Yi Liu Hua liuchang0812 +Lloyd Ramey Louis Kottmann Luke Carpenter +Marcus Martins Mary Anthony Matt Bentley Matt Duch Matt Moore Matt Robenolt +Matthew Green Michael Prokop Michal Minar +Michal Minář +Mike Brown Miquel Sabaté +Misty Stanley-Jones +Misty Stanley-Jones Morgan Bauer moxiegirl Nathan Sullivan nevermosby Nghia Tran +Nikita Tarasov +Noah Treuhaft Nuutti Kotivuori Oilbeater Olivier Gambier Olivier Jacques Omer Cohen Patrick Devine +Phil Estes Philip Misiowiec +Pierre-Yves Ritschard +Qiao Anran +Randy Barlow Richard Scothern Rodolfo Carvalho Rusty Conover Sean Boran Sebastiaan van Stijn +Sebastien Coavoux +Serge Dubrouski Sharif Nassar Shawn Falkner-Horine Shreyas Karnik Simon Thulbourn +spacexnice Spencer Rinehart +Stan Hu +Stefan Majewsky Stefan Weil Stephen J Day Sungho Moon @@ -114,8 +162,11 @@ Thomas Sjögren Tianon Gravi Tibor Vass Tonis Tiigi +Tony Holdstock-Brown Trevor Pounds Troels Thomsen +Victor Vieux +Victoria Bialas Vincent Batts Vincent Demeester Vincent Giersch @@ -124,5 +175,8 @@ weiyuan.yl xg.song xiekeyang Yann ROBERT +yaoyao.xyy +yuexiao-wang yuzou +zhouhaibing089 姜继忠 diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md new file mode 100644 index 00000000000..c59828182a2 --- /dev/null +++ b/vendor/github.com/docker/distribution/BUILDING.md @@ -0,0 +1,117 @@ + +# Building the registry source + +## Use-case + +This is useful if you intend to actively work on the registry. + +### Alternatives + +Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). + +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. + +OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). + +### Gotchas + +You are expected to know your way around with go & git. + +If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. + +## Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + + go get github.com/docker/distribution/cmd/registry + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + + mkdir -p /var/lib/registry + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + + $ $GOPATH/bin/registry --version + $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + + $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml + INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] debug server listening localhost:5001 + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + + go get github.com/golang/lint/golint + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ make + + clean + + fmt + + vet + + lint + + build + github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar + github.com/sirupsen/logrus + github.com/docker/libtrust + ... + github.com/yvasiyarov/gorelic + github.com/docker/distribution/registry/handlers + github.com/docker/distribution/cmd/registry + + test + ... + ok github.com/docker/distribution/digest 7.875s + ok github.com/docker/distribution/manifest 0.028s + ok github.com/docker/distribution/notifications 17.322s + ? github.com/docker/distribution/registry [no test files] + ok github.com/docker/distribution/registry/api/v2 0.101s + ? github.com/docker/distribution/registry/auth [no test files] + ok github.com/docker/distribution/registry/auth/silly 0.011s + ... + + /Users/sday/go/src/github.com/docker/distribution/bin/registry + + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + + binaries + +The above provides a repeatable build using the contents of the vendor +directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + + $ ./bin/registry -version + ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md new file mode 100644 index 00000000000..e7b16b3c258 --- /dev/null +++ b/vendor/github.com/docker/distribution/CHANGELOG.md @@ -0,0 +1,108 @@ +# Changelog + +## 2.6.0 (2017-01-18) + +#### Storage +- S3: fixed bug in delete due to read-after-write inconsistency +- S3: allow EC2 IAM roles to be used when authorizing region endpoints +- S3: add Object ACL Support +- S3: fix delete method's notion of subpaths +- S3: use multipart upload API in `Move` method for performance +- S3: add v2 signature signing for legacy S3 clones +- Swift: add simple heuristic to detect incomplete DLOs during read ops +- Swift: support different user and tenant domains +- Swift: bulk deletes in chunks +- Aliyun OSS: fix delete method's notion of subpaths +- Aliyun OSS: optimize data copy after upload finishes +- Azure: close leaking response body +- Fix storage drivers dropping non-EOF errors when listing repositories +- Compare path properly when listing repositories in catalog +- Add a foreign layer URL host whitelist +- Improve catalog enumerate runtime + +#### Registry +- Export `storage.CreateOptions` in top-level package +- Enable notifications to endpoints that use self-signed certificates +- Properly validate multi-URL foreign layers +- Add control over validation of URLs in pushed manifests +- Proxy mode: fix socket leak when pull is cancelled +- Tag service: properly handle error responses on HEAD request +- Support for custom authentication URL in proxying registry +- Add configuration option to disable access logging +- Add notification filtering by target media type +- Manifest: `References()` returns all children +- Honor `X-Forwarded-Port` and Forwarded headers +- Reference: Preserve tag and digest in With* functions +- Add policy configuration for enforcing repository classes + +#### Client +- Changes the client Tags `All()` method to follow links +- Allow registry clients to connect via HTTP2 +- Better handling of OAuth errors in client + +#### Spec +- Manifest: clarify relationship between urls and foreign layers +- Authorization: add support for repository classes + +#### Manifest +- Override media type returned from `Stat()` for existing manifests +- Add plugin mediatype to distribution manifest + +#### Docs +- Document `TOOMANYREQUESTS` error code +- Document required Let's Encrypt port +- Improve documentation around implementation of OAuth2 +- Improve documentation for configuration + +#### Auth +- Add support for registry type in scope +- Add support for using v2 ping challenges for v1 +- Add leeway to JWT `nbf` and `exp` checking +- htpasswd: dynamically parse htpasswd file +- Fix missing auth headers with PATCH HTTP request when pushing to default port + +#### Dockerfile +- Update to go1.7 +- Reorder Dockerfile steps for better layer caching + +#### Notes + +Documentation has moved to the documentation repository at +`github.com/docker/docker.github.io/tree/master/registry` + +The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. + + +## 2.5.0 (2016-06-14) + +#### Storage +- Ensure uploads directory is cleaned after upload is committed +- Add ability to cap concurrent operations in filesystem driver +- S3: Add 'us-gov-west-1' to the valid region list +- Swift: Handle ceph not returning Last-Modified header for HEAD requests +- Add redirect middleware + +#### Registry +- Add support for blobAccessController middleware +- Add support for layers from foreign sources +- Remove signature store +- Add support for Let's Encrypt +- Correct yaml key names in configuration + +#### Client +- Add option to get content digest from manifest get + +#### Spec +- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported +- Clarify API documentation around catalog fetch behavior + +#### API +- Support returning HTTP 429 (Too Many Requests) + +#### Documentation +- Update auth documentation examples to show "expires in" as int + +#### Docker Image +- Use Alpine Linux as base image + + diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md index 7cc7aedffeb..1f1441beec3 100644 --- a/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -21,6 +21,14 @@ Then please do not open an issue here yet - you should first try one of the foll - irc: #docker-distribution on freenode - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +### Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + ## Reporting an issue properly By following these simple rules you will get better and faster feedback on your issue. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile index abb3e3bbf84..ac8dbca2fbc 100644 --- a/vendor/github.com/docker/distribution/Dockerfile +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -1,15 +1,18 @@ -FROM golang:1.6 - -RUN apt-get update && \ - apt-get install -y apache2-utils && \ - rm -rf /var/lib/apt/lists/* +FROM golang:1.8-alpine ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DOCKER_BUILDTAGS include_oss include_gcs +ARG GOOS=linux +ARG GOARCH=amd64 + +RUN set -ex \ + && apk add --no-cache make git + WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml + RUN make PREFIX=/go clean binaries VOLUME ["/var/lib/registry"] diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS index 97f415dbfbc..bda400150c9 100644 --- a/vendor/github.com/docker/distribution/MAINTAINERS +++ b/vendor/github.com/docker/distribution/MAINTAINERS @@ -32,11 +32,6 @@ Email = "aaron.lehmann@docker.com" GitHub = "aaronlehmann" - [people.brianbland] - Name = "Brian Bland" - Email = "brian.bland@docker.com" - GitHub = "BrianBland" - [people.dmcgowan] Name = "Derek McGowan" Email = "derek@mcgstyle.net" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile index a0602d0b2c0..7c6f9c7a6c5 100644 --- a/vendor/github.com/docker/distribution/Makefile +++ b/vendor/github.com/docker/distribution/Makefile @@ -13,7 +13,7 @@ endif GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" -.PHONY: clean all fmt vet lint build test binaries +.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet .DEFAULT: all all: fmt vet lint build test binaries @@ -27,22 +27,25 @@ version/version.go: # Required for go 1.5 to build GO15VENDOREXPERIMENT := 1 +# Go files +GOFILES=$(shell find . -type f -name '*.go') + # Package list -PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) +PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) # Resolving binary dependencies for specific targets -GOLINT := $(shell which golint || echo '') -GODEP := $(shell which godep || echo '') +GOLINT=$(shell which golint || echo '') +VNDR=$(shell which vndr || echo '') -${PREFIX}/bin/registry: $(wildcard **/*.go) +${PREFIX}/bin/registry: $(GOFILES) @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry -${PREFIX}/bin/digest: $(wildcard **/*.go) +${PREFIX}/bin/digest: $(GOFILES) @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest -${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go) +${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES) @echo "+ $@" @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template @@ -83,24 +86,14 @@ clean: @echo "+ $@" @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" -dep-save: - @echo "+ $@" - $(if $(GODEP), , \ - $(error Please install godep: go get github.com/tools/godep)) - @$(GODEP) save $(PKGS) - -dep-restore: - @echo "+ $@" - $(if $(GODEP), , \ - $(error Please install godep: go get github.com/tools/godep)) - @$(GODEP) restore -v - -dep-validate: dep-restore +dep-validate: @echo "+ $@" + $(if $(VNDR), , \ + $(error Please install vndr: go get github.com/lk4d4/vndr)) @rm -Rf .vendor.bak @mv vendor .vendor.bak - @rm -Rf Godeps - @$(GODEP) save ./... + @$(VNDR) @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ - (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) - @rm -Rf .vendor.bak + (echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false) + @rm -Rf vendor + @mv .vendor.bak vendor diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md index c21d47243ae..998878850cd 100644 --- a/vendor/github.com/docker/distribution/README.md +++ b/vendor/github.com/docker/distribution/README.md @@ -19,7 +19,7 @@ This repository contains the following components: | **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | | **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | ### How does this integrate with Docker engine? @@ -60,15 +60,15 @@ For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). By default, Docker users pull images from Docker's public registry instance. [Installing Docker](https://docs.docker.com/engine/installation/) gives users this ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. +if they have a [Docker Hub](https://hub.docker.com/) account. For some users and even companies, this default behavior is sufficient. For -others, it is not. +others, it is not. For example, users with their own software products may want to maintain a registry for private, company images. Also, you may wish to deploy your own image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](docs/deploying.md) +use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) may be the better choice. ### Migration to Registry 2.0 @@ -76,14 +76,13 @@ may be the better choice. For those who have previously deployed their own registry based on the Registry 1.0 implementation and wish to deploy a Registry 2.0 while retaining images, data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator] -(https://github.com/docker/migrator). +created. For more information see [docker/migrator](https://github.com/docker/migrator). ## Contribute Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](docs/building.md). +the instructions for [building a development environment](BUILDING.md). ## Support diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md new file mode 100644 index 00000000000..73eba5a8716 --- /dev/null +++ b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md @@ -0,0 +1,44 @@ +## Registry Release Checklist + +10. Compile release notes detailing features and since the last release. + + Update the `CHANGELOG.md` file and create a PR to master with the updates. +Once that PR has been approved by maintainers the change may be cherry-picked +to the release branch (new release branches may be forked from this commit). + +20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` + +30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. + +``` +make AUTHORS +``` + +40. Create a signed tag. + + Distribution uses semantic versioning. Tags are of the format +`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added +to your Github account. The comment for the tag should include the release +notes, use previous tags as a guide for formatting consistently. Run +`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag, +check comment and correct commit hash. + +50. Push the signed tag + +60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. + +70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. + +80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. +e.g. to release `2.3.1` + + `2.3.1 (new)` + + `2.3.0 -> 2.3.0` can be removed + + `2 -> 2.3.1` + + `2.3 -> 2.3.1` + +90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. + diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md index 9cdfa36c92a..701127afec6 100644 --- a/vendor/github.com/docker/distribution/ROADMAP.md +++ b/vendor/github.com/docker/distribution/ROADMAP.md @@ -156,7 +156,7 @@ full and understand the problems behind deletes. While, at first glance, implementing deleting seems simple, there are a number mitigating factors that make many solutions not ideal or even pathological in the context of a registry. The following paragraph discuss the background and -approaches that could be applied to a arrive at a solution. +approaches that could be applied to arrive at a solution. The goal of deletes in any system is to remove unused or unneeded data. Only data requested for deletion should be removed and no other data. Removing diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go index 1765e9f7408..01d309029ff 100644 --- a/vendor/github.com/docker/distribution/blobs.go +++ b/vendor/github.com/docker/distribution/blobs.go @@ -8,8 +8,8 @@ import ( "time" "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" ) var ( @@ -69,6 +69,9 @@ type Descriptor struct { // against against this digest. Digest digest.Digest `json:"digest,omitempty"` + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + // NOTE: Before adding a field here, please ensure that all // other options have been exhausted. Much of the type relationships // depend on the simplicity of this type. @@ -124,6 +127,11 @@ type BlobDescriptorService interface { Clear(ctx context.Context, dgst digest.Digest) error } +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + // ReadSeekCloser is the primary reader type for blob data, combining // io.ReadSeeker with io.Closer. type ReadSeekCloser interface { @@ -144,7 +152,7 @@ type BlobProvider interface { // BlobServer can serve blobs via http. type BlobServer interface { - // ServeBlob attempts to serve the blob, identifed by dgst, via http. The + // ServeBlob attempts to serve the blob, identified by dgst, via http. The // service may decide to redirect the client elsewhere or serve the data // directly. // @@ -184,6 +192,18 @@ type BlobCreateOption interface { Apply(interface{}) error } +// CreateOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type CreateOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + // Stat allows to pass precalculated descriptor to link and return. + // Blob access check will be skipped if set. + Stat *Descriptor + } +} + // BlobWriter provides a handle for inserting data into a blob store. // Instances should be obtained from BlobWriteService.Writer and // BlobWriteService.Resume. If supported by the store, a writer can be diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml index 3d1ffd2f06a..ddc76c86c76 100644 --- a/vendor/github.com/docker/distribution/circle.yml +++ b/vendor/github.com/docker/distribution/circle.yml @@ -8,7 +8,7 @@ machine: post: # go - - gvm install go1.6 --prefer-binary --name=stable + - gvm install go1.8 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations @@ -34,7 +34,7 @@ dependencies: override: # Install dependencies for every copied clone/go version - - gvm use stable && go get github.com/tools/godep: + - gvm use stable && go get github.com/lk4d4/vndr: pwd: $BASE_STABLE post: @@ -50,12 +50,14 @@ test: - gvm use stable && go version # Ensure validation of dependencies - - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: + - git fetch origin: + pwd: $BASE_STABLE + - gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi: pwd: $BASE_STABLE # First thing: build everything. This will catch compile errors, and it's # also necessary for go vet to work properly (see #807). - - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): + - gvm use stable && go install $(go list ./... | grep -v "/vendor/"): pwd: $BASE_STABLE # FMT @@ -72,17 +74,20 @@ test: override: # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': - timeout: 600 + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': + timeout: 1000 pwd: $BASE_STABLE + # Test stable with race + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': + timeout: 1000 + pwd: $BASE_STABLE post: # Report to codecov - bash <(curl -s https://codecov.io/bash): pwd: $BASE_STABLE ## Notes - # Disabled the -race detector due to massive memory usage. # Do we want these as well? # - go get code.google.com/p/go.tools/cmd/goimports # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" diff --git a/vendor/github.com/docker/distribution/digest/digester_resumable_test.go b/vendor/github.com/docker/distribution/digest/digester_resumable_test.go deleted file mode 100644 index 6ba21c801a8..00000000000 --- a/vendor/github.com/docker/distribution/digest/digester_resumable_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !noresumabledigest - -package digest - -import ( - "testing" - - "github.com/stevvooe/resumable" - _ "github.com/stevvooe/resumable/sha256" -) - -// TestResumableDetection just ensures that the resumable capability of a hash -// is exposed through the digester type, which is just a hash plus a Digest -// method. -func TestResumableDetection(t *testing.T) { - d := Canonical.New() - - if _, ok := d.Hash().(resumable.Hash); !ok { - t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) - } -} diff --git a/vendor/github.com/docker/distribution/digest/verifiers_test.go b/vendor/github.com/docker/distribution/digest/verifiers_test.go deleted file mode 100644 index c342d6e7c84..00000000000 --- a/vendor/github.com/docker/distribution/digest/verifiers_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package digest - -import ( - "bytes" - "crypto/rand" - "io" - "testing" -) - -func TestDigestVerifier(t *testing.T) { - p := make([]byte, 1<<20) - rand.Read(p) - digest := FromBytes(p) - - verifier, err := NewDigestVerifier(digest) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, bytes.NewReader(p)) - - if !verifier.Verified() { - t.Fatalf("bytes not verified") - } -} - -// TestVerifierUnsupportedDigest ensures that unsupported digest validation is -// flowing through verifier creation. -func TestVerifierUnsupportedDigest(t *testing.T) { - unsupported := Digest("bean:0123456789abcdef") - - _, err := NewDigestVerifier(unsupported) - if err == nil { - t.Fatalf("expected error when creating verifier") - } - - if err != ErrDigestUnsupported { - t.Fatalf("incorrect error for unsupported digest: %v", err) - } -} - -// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for -// DigestVerifier. -// -// The relevant benchmark for comparison can be run with the following -// commands: -// -// go test -bench . crypto/sha1 -// diff --git a/vendor/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digestset/set.go similarity index 90% rename from vendor/github.com/docker/distribution/digest/set.go rename to vendor/github.com/docker/distribution/digestset/set.go index 4b9313c1aea..71327dca720 100644 --- a/vendor/github.com/docker/distribution/digest/set.go +++ b/vendor/github.com/docker/distribution/digestset/set.go @@ -1,10 +1,12 @@ -package digest +package digestset import ( "errors" "sort" "strings" "sync" + + digest "github.com/opencontainers/go-digest" ) var ( @@ -44,7 +46,7 @@ func NewSet() *Set { // values or short values. This function does not test equality, // rather whether the second value could match against the first // value. -func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { if len(hex) == len(shortHex) { if hex != shortHex { return false @@ -64,7 +66,7 @@ func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { // If no digests could be found ErrDigestNotFound will be returned // with an empty digest value. If multiple matches are found // ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (Digest, error) { +func (dst *Set) Lookup(d string) (digest.Digest, error) { dst.mutex.RLock() defer dst.mutex.RUnlock() if len(dst.entries) == 0 { @@ -72,11 +74,11 @@ func (dst *Set) Lookup(d string) (Digest, error) { } var ( searchFunc func(int) bool - alg Algorithm + alg digest.Algorithm hex string ) - dgst, err := ParseDigest(d) - if err == ErrDigestInvalidFormat { + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { hex = d searchFunc = func(i int) bool { return dst.entries[i].val >= d @@ -108,7 +110,7 @@ func (dst *Set) Lookup(d string) (Digest, error) { // Add adds the given digest to the set. An error will be returned // if the given digest is invalid. If the digest already exists in the // set, this operation will be a no-op. -func (dst *Set) Add(d Digest) error { +func (dst *Set) Add(d digest.Digest) error { if err := d.Validate(); err != nil { return err } @@ -139,7 +141,7 @@ func (dst *Set) Add(d Digest) error { // Remove removes the given digest from the set. An err will be // returned if the given digest is invalid. If the digest does // not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d Digest) error { +func (dst *Set) Remove(d digest.Digest) error { if err := d.Validate(); err != nil { return err } @@ -167,10 +169,10 @@ func (dst *Set) Remove(d Digest) error { } // All returns all the digests in the set -func (dst *Set) All() []Digest { +func (dst *Set) All() []digest.Digest { dst.mutex.RLock() defer dst.mutex.RUnlock() - retValues := make([]Digest, len(dst.entries)) + retValues := make([]digest.Digest, len(dst.entries)) for i := range dst.entries { retValues[i] = dst.entries[i].digest } @@ -183,10 +185,10 @@ func (dst *Set) All() []Digest { // entire value of digest if uniqueness cannot be achieved without the // full value. This function will attempt to make short codes as short // as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[Digest]string { +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { dst.mutex.RLock() defer dst.mutex.RUnlock() - m := make(map[Digest]string, len(dst.entries)) + m := make(map[digest.Digest]string, len(dst.entries)) l := length resetIdx := 0 for i := 0; i < len(dst.entries); i++ { @@ -222,9 +224,9 @@ func ShortCodeTable(dst *Set, length int) map[Digest]string { } type digestEntry struct { - alg Algorithm + alg digest.Algorithm val string - digest Digest + digest digest.Digest } type digestEntries []*digestEntry diff --git a/vendor/github.com/docker/distribution/digest/set_test.go b/vendor/github.com/docker/distribution/digestset/set_test.go similarity index 93% rename from vendor/github.com/docker/distribution/digest/set_test.go rename to vendor/github.com/docker/distribution/digestset/set_test.go index e9dab8795a7..89c5729d005 100644 --- a/vendor/github.com/docker/distribution/digest/set_test.go +++ b/vendor/github.com/docker/distribution/digestset/set_test.go @@ -1,20 +1,23 @@ -package digest +package digestset import ( "crypto/sha256" + _ "crypto/sha512" "encoding/binary" "math/rand" "testing" + + digest "github.com/opencontainers/go-digest" ) -func assertEqualDigests(t *testing.T, d1, d2 Digest) { +func assertEqualDigests(t *testing.T, d1, d2 digest.Digest) { if d1 != d2 { t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) } } func TestLookup(t *testing.T) { - digests := []Digest{ + digests := []digest.Digest{ "sha256:1234511111111111111111111111111111111111111111111111111111111111", "sha256:1234111111111111111111111111111111111111111111111111111111111111", "sha256:1234611111111111111111111111111111111111111111111111111111111111", @@ -88,7 +91,7 @@ func TestLookup(t *testing.T) { } func TestAddDuplication(t *testing.T) { - digests := []Digest{ + digests := []digest.Digest{ "sha256:1234111111111111111111111111111111111111111111111111111111111111", "sha256:1234511111111111111111111111111111111111111111111111111111111111", "sha256:1234611111111111111111111111111111111111111111111111111111111111", @@ -110,7 +113,7 @@ func TestAddDuplication(t *testing.T) { t.Fatal("Invalid dset size") } - if err := dset.Add(Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { + if err := dset.Add(digest.Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { t.Fatal(err) } @@ -118,7 +121,7 @@ func TestAddDuplication(t *testing.T) { t.Fatal("Duplicate digest insert allowed") } - if err := dset.Add(Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { + if err := dset.Add(digest.Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { t.Fatal(err) } @@ -170,7 +173,7 @@ func TestAll(t *testing.T) { } } - all := map[Digest]struct{}{} + all := map[digest.Digest]struct{}{} for _, dgst := range dset.All() { all[dgst] = struct{}{} } @@ -194,7 +197,7 @@ func assertEqualShort(t *testing.T, actual, expected string) { } func TestShortCodeTable(t *testing.T) { - digests := []Digest{ + digests := []digest.Digest{ "sha256:1234111111111111111111111111111111111111111111111111111111111111", "sha256:1234511111111111111111111111111111111111111111111111111111111111", "sha256:1234611111111111111111111111111111111111111111111111111111111111", @@ -227,15 +230,15 @@ func TestShortCodeTable(t *testing.T) { assertEqualShort(t, dump[digests[7]], "653") } -func createDigests(count int) ([]Digest, error) { +func createDigests(count int) ([]digest.Digest, error) { r := rand.New(rand.NewSource(25823)) - digests := make([]Digest, count) + digests := make([]digest.Digest, count) for i := range digests { h := sha256.New() if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { return nil, err } - digests[i] = NewDigest("sha256", h) + digests[i] = digest.NewDigest("sha256", h) } return digests, nil } diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go index c20f28113ce..020d33258b9 100644 --- a/vendor/github.com/docker/distribution/errors.go +++ b/vendor/github.com/docker/distribution/errors.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) // ErrAccessDenied is returned when an access to a requested resource is @@ -77,7 +77,7 @@ func (err ErrManifestUnknownRevision) Error() string { type ErrManifestUnverified struct{} func (ErrManifestUnverified) Error() string { - return fmt.Sprintf("unverified manifest") + return "unverified manifest" } // ErrManifestVerification provides a type to collect errors encountered diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go index 3bf912a6597..2c99f25d391 100644 --- a/vendor/github.com/docker/distribution/manifests.go +++ b/vendor/github.com/docker/distribution/manifests.go @@ -5,20 +5,25 @@ import ( "mime" "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) // Manifest represents a registry object specifying a set of // references and an optional target type Manifest interface { // References returns a list of objects which make up this manifest. - // The references are strictly ordered from base to head. A reference - // is anything which can be represented by a distribution.Descriptor + // A reference is anything which can be represented by a + // distribution.Descriptor. These can consist of layers, resources or other + // manifests. + // + // While no particular order is required, implementations should return + // them from highest to lowest priority. For example, one might want to + // return the base layer before the top layer. References() []Descriptor // Payload provides the serialized format of the manifest, in addition to - // the mediatype. - Payload() (mediatype string, payload []byte, err error) + // the media type. + Payload() (mediaType string, payload []byte, err error) } // ManifestBuilder creates a manifest allowing one to include dependencies. @@ -36,6 +41,9 @@ type ManifestBuilder interface { // AppendReference includes the given object in the manifest after any // existing dependencies. If the add fails, such as when adding an // unsupported dependency, an error may be returned. + // + // The destination of the reference is dependent on the manifest type and + // the dependency type. AppendReference(dependency Describable) error } @@ -61,12 +69,6 @@ type ManifestEnumerator interface { Enumerate(ctx context.Context, ingester func(digest.Digest) error) error } -// SignaturesGetter provides an interface for getting the signatures of a schema1 manifest. If the digest -// referred to is not a schema1 manifest, an error should be returned. -type SignaturesGetter interface { - GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error) -} - // Describable is an interface for descriptors type Describable interface { Descriptor() Descriptor @@ -92,20 +94,20 @@ var mappings = make(map[string]UnmarshalFunc, 0) func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { // Need to look up by the actual media type, not the raw contents of // the header. Strip semicolons and anything following them. - var mediatype string + var mediaType string if ctHeader != "" { var err error - mediatype, _, err = mime.ParseMediaType(ctHeader) + mediaType, _, err = mime.ParseMediaType(ctHeader) if err != nil { return nil, Descriptor{}, err } } - unmarshalFunc, ok := mappings[mediatype] + unmarshalFunc, ok := mappings[mediaType] if !ok { unmarshalFunc, ok = mappings[""] if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) + return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) } } @@ -114,10 +116,10 @@ func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) // RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This // should be called from specific -func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { - if _, ok := mappings[mediatype]; ok { - return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) +func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { + if _, ok := mappings[mediaType]; ok { + return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) } - mappings[mediatype] = u + mappings[mediaType] = u return nil } diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 00000000000..978df7eabbf --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 00000000000..2d71fc5e9ff --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,170 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/docker/distribution/reference/normalize_test.go b/vendor/github.com/docker/distribution/reference/normalize_test.go new file mode 100644 index 00000000000..a881972accb --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/normalize_test.go @@ -0,0 +1,625 @@ +package reference + +import ( + "strconv" + "testing" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +func TestValidateReferenceName(t *testing.T) { + validRepoNames := []string{ + "docker/docker", + "library/debian", + "debian", + "docker.io/docker/docker", + "docker.io/library/debian", + "docker.io/debian", + "index.docker.io/docker/docker", + "index.docker.io/library/debian", + "index.docker.io/debian", + "127.0.0.1:5000/docker/docker", + "127.0.0.1:5000/library/debian", + "127.0.0.1:5000/debian", + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + + // This test case was moved from invalid to valid since it is valid input + // when specified with a hostname, it removes the ambiguity from about + // whether the value is an identifier or repository name + "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + } + invalidRepoNames := []string{ + "https://github.com/docker/docker", + "docker/Docker", + "-docker", + "-docker/docker", + "-docker.io/docker/docker", + "docker///docker", + "docker.io/docker/Docker", + "docker.io/docker///docker", + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + } + + for _, name := range invalidRepoNames { + _, err := ParseNormalizedNamed(name) + if err == nil { + t.Fatalf("Expected invalid repo name for %q", name) + } + } + + for _, name := range validRepoNames { + _, err := ParseNormalizedNamed(name) + if err != nil { + t.Fatalf("Error parsing repo name %s, got: %q", name, err) + } + } +} + +func TestValidateRemoteName(t *testing.T) { + validRepositoryNames := []string{ + // Sanity check. + "docker/docker", + + // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + + // Allow embedded hyphens. + "docker-rules/docker", + + // Allow multiple hyphens as well. + "docker---rules/docker", + + //Username doc and image name docker being tested. + "doc/docker", + + // single character names are now allowed. + "d/docker", + "jess/t", + + // Consecutive underscores. + "dock__er/docker", + } + for _, repositoryName := range validRepositoryNames { + _, err := ParseNormalizedNamed(repositoryName) + if err != nil { + t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) + } + } + + invalidRepositoryNames := []string{ + // Disallow capital letters. + "docker/Docker", + + // Only allow one slash. + "docker///docker", + + // Disallow 64-character hexadecimal. + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + + // Disallow leading and trailing hyphens in namespace. + "-docker/docker", + "docker-/docker", + "-docker-/docker", + + // Don't allow underscores everywhere (as opposed to hyphens). + "____/____", + + "_docker/_docker", + + // Disallow consecutive periods. + "dock..er/docker", + "dock_.er/docker", + "dock-.er/docker", + + // No repository. + "docker/", + + //namespace too long + "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", + } + for _, repositoryName := range invalidRepositoryNames { + if _, err := ParseNormalizedNamed(repositoryName); err == nil { + t.Errorf("Repository name should be invalid: %v", repositoryName) + } + } +} + +func TestParseRepositoryInfo(t *testing.T) { + type tcase struct { + RemoteName, FamiliarName, FullName, AmbiguousName, Domain string + } + + tcases := []tcase{ + { + RemoteName: "fooo/bar", + FamiliarName: "fooo/bar", + FullName: "docker.io/fooo/bar", + AmbiguousName: "index.docker.io/fooo/bar", + Domain: "docker.io", + }, + { + RemoteName: "library/ubuntu", + FamiliarName: "ubuntu", + FullName: "docker.io/library/ubuntu", + AmbiguousName: "library/ubuntu", + Domain: "docker.io", + }, + { + RemoteName: "nonlibrary/ubuntu", + FamiliarName: "nonlibrary/ubuntu", + FullName: "docker.io/nonlibrary/ubuntu", + AmbiguousName: "", + Domain: "docker.io", + }, + { + RemoteName: "other/library", + FamiliarName: "other/library", + FullName: "docker.io/other/library", + AmbiguousName: "", + Domain: "docker.io", + }, + { + RemoteName: "private/moonbase", + FamiliarName: "127.0.0.1:8000/private/moonbase", + FullName: "127.0.0.1:8000/private/moonbase", + AmbiguousName: "", + Domain: "127.0.0.1:8000", + }, + { + RemoteName: "privatebase", + FamiliarName: "127.0.0.1:8000/privatebase", + FullName: "127.0.0.1:8000/privatebase", + AmbiguousName: "", + Domain: "127.0.0.1:8000", + }, + { + RemoteName: "private/moonbase", + FamiliarName: "example.com/private/moonbase", + FullName: "example.com/private/moonbase", + AmbiguousName: "", + Domain: "example.com", + }, + { + RemoteName: "privatebase", + FamiliarName: "example.com/privatebase", + FullName: "example.com/privatebase", + AmbiguousName: "", + Domain: "example.com", + }, + { + RemoteName: "private/moonbase", + FamiliarName: "example.com:8000/private/moonbase", + FullName: "example.com:8000/private/moonbase", + AmbiguousName: "", + Domain: "example.com:8000", + }, + { + RemoteName: "privatebasee", + FamiliarName: "example.com:8000/privatebasee", + FullName: "example.com:8000/privatebasee", + AmbiguousName: "", + Domain: "example.com:8000", + }, + { + RemoteName: "library/ubuntu-12.04-base", + FamiliarName: "ubuntu-12.04-base", + FullName: "docker.io/library/ubuntu-12.04-base", + AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", + Domain: "docker.io", + }, + { + RemoteName: "library/foo", + FamiliarName: "foo", + FullName: "docker.io/library/foo", + AmbiguousName: "docker.io/foo", + Domain: "docker.io", + }, + { + RemoteName: "library/foo/bar", + FamiliarName: "library/foo/bar", + FullName: "docker.io/library/foo/bar", + AmbiguousName: "", + Domain: "docker.io", + }, + { + RemoteName: "store/foo/bar", + FamiliarName: "store/foo/bar", + FullName: "docker.io/store/foo/bar", + AmbiguousName: "", + Domain: "docker.io", + }, + } + + for _, tcase := range tcases { + refStrings := []string{tcase.FamiliarName, tcase.FullName} + if tcase.AmbiguousName != "" { + refStrings = append(refStrings, tcase.AmbiguousName) + } + + var refs []Named + for _, r := range refStrings { + named, err := ParseNormalizedNamed(r) + if err != nil { + t.Fatal(err) + } + refs = append(refs, named) + } + + for _, r := range refs { + if expected, actual := tcase.FamiliarName, FamiliarName(r); expected != actual { + t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.FullName, r.String(); expected != actual { + t.Fatalf("Invalid canonical reference for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.Domain, Domain(r); expected != actual { + t.Fatalf("Invalid domain for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.RemoteName, Path(r); expected != actual { + t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) + } + + } + } +} + +func TestParseReferenceWithTagAndDigest(t *testing.T) { + shortRef := "busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa" + ref, err := ParseNormalizedNamed(shortRef) + if err != nil { + t.Fatal(err) + } + if expected, actual := "docker.io/library/"+shortRef, ref.String(); actual != expected { + t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) + } + + if _, isTagged := ref.(NamedTagged); !isTagged { + t.Fatalf("Reference from %q should support tag", ref) + } + if _, isCanonical := ref.(Canonical); !isCanonical { + t.Fatalf("Reference from %q should support digest", ref) + } + if expected, actual := shortRef, FamiliarString(ref); actual != expected { + t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) + } +} + +func TestInvalidReferenceComponents(t *testing.T) { + if _, err := ParseNormalizedNamed("-foo"); err == nil { + t.Fatal("Expected WithName to detect invalid name") + } + ref, err := ParseNormalizedNamed("busybox") + if err != nil { + t.Fatal(err) + } + if _, err := WithTag(ref, "-foo"); err == nil { + t.Fatal("Expected WithName to detect invalid tag") + } + if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { + t.Fatal("Expected WithDigest to detect invalid digest") + } +} + +func equalReference(r1, r2 Reference) bool { + switch v1 := r1.(type) { + case digestReference: + if v2, ok := r2.(digestReference); ok { + return v1 == v2 + } + case repository: + if v2, ok := r2.(repository); ok { + return v1 == v2 + } + case taggedReference: + if v2, ok := r2.(taggedReference); ok { + return v1 == v2 + } + case canonicalReference: + if v2, ok := r2.(canonicalReference); ok { + return v1 == v2 + } + case reference: + if v2, ok := r2.(reference); ok { + return v1 == v2 + } + } + return false +} + +func TestParseAnyReference(t *testing.T) { + tcases := []struct { + Reference string + Equivalent string + Expected Reference + Digests []digest.Digest + }{ + { + Reference: "redis", + Equivalent: "docker.io/library/redis", + }, + { + Reference: "redis:latest", + Equivalent: "docker.io/library/redis:latest", + }, + { + Reference: "docker.io/library/redis:latest", + Equivalent: "docker.io/library/redis:latest", + }, + { + Reference: "redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "dmcgowan/myapp", + Equivalent: "docker.io/dmcgowan/myapp", + }, + { + Reference: "dmcgowan/myapp:latest", + Equivalent: "docker.io/dmcgowan/myapp:latest", + }, + { + Reference: "docker.io/mcgowan/myapp:latest", + Equivalent: "docker.io/mcgowan/myapp:latest", + }, + { + Reference: "dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + }, + { + Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", + Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", + }, + { + Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", + Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Digests: []digest.Digest{ + digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + }, + }, + { + Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", + Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", + Digests: []digest.Digest{ + digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + }, + }, + { + Reference: "dbcc1c", + Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", + Digests: []digest.Digest{ + digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + }, + }, + { + Reference: "dbcc1", + Equivalent: "docker.io/library/dbcc1", + Digests: []digest.Digest{ + digest.Digest("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + }, + }, + { + Reference: "dbcc1c", + Equivalent: "docker.io/library/dbcc1c", + Digests: []digest.Digest{ + digest.Digest("sha256:abcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), + }, + }, + } + + for _, tcase := range tcases { + var ref Reference + var err error + if len(tcase.Digests) == 0 { + ref, err = ParseAnyReference(tcase.Reference) + } else { + ds := digestset.NewSet() + for _, dgst := range tcase.Digests { + if err := ds.Add(dgst); err != nil { + t.Fatalf("Error adding digest %s: %v", dgst.String(), err) + } + } + ref, err = ParseAnyReferenceWithSet(tcase.Reference, ds) + } + if err != nil { + t.Fatalf("Error parsing reference %s: %v", tcase.Reference, err) + } + if ref.String() != tcase.Equivalent { + t.Fatalf("Unexpected string: %s, expected %s", ref.String(), tcase.Equivalent) + } + + expected := tcase.Expected + if expected == nil { + expected, err = Parse(tcase.Equivalent) + if err != nil { + t.Fatalf("Error parsing reference %s: %v", tcase.Equivalent, err) + } + } + if !equalReference(ref, expected) { + t.Errorf("Unexpected reference %#v, expected %#v", ref, expected) + } + } +} + +func TestNormalizedSplitHostname(t *testing.T) { + testcases := []struct { + input string + domain string + name string + }{ + { + input: "test.com/foo", + domain: "test.com", + name: "foo", + }, + { + input: "test_com/foo", + domain: "docker.io", + name: "test_com/foo", + }, + { + input: "docker/migrator", + domain: "docker.io", + name: "docker/migrator", + }, + { + input: "test.com:8080/foo", + domain: "test.com:8080", + name: "foo", + }, + { + input: "test-com:8080/foo", + domain: "test-com:8080", + name: "foo", + }, + { + input: "foo", + domain: "docker.io", + name: "library/foo", + }, + { + input: "xn--n3h.com/foo", + domain: "xn--n3h.com", + name: "foo", + }, + { + input: "xn--n3h.com:18080/foo", + domain: "xn--n3h.com:18080", + name: "foo", + }, + { + input: "docker.io/foo", + domain: "docker.io", + name: "library/foo", + }, + { + input: "docker.io/library/foo", + domain: "docker.io", + name: "library/foo", + }, + { + input: "docker.io/library/foo/bar", + domain: "docker.io", + name: "library/foo/bar", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + named, err := ParseNormalizedNamed(testcase.input) + if err != nil { + failf("error parsing name: %s", err) + } + domain, name := SplitHostname(named) + if domain != testcase.domain { + failf("unexpected domain: got %q, expected %q", domain, testcase.domain) + } + if name != testcase.name { + failf("unexpected name: got %q, expected %q", name, testcase.name) + } + } +} + +func TestMatchError(t *testing.T) { + named, err := ParseAnyReference("foo") + if err != nil { + t.Fatal(err) + } + _, err = FamiliarMatch("[-x]", named) + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestMatch(t *testing.T) { + matchCases := []struct { + reference string + pattern string + expected bool + }{ + { + reference: "foo", + pattern: "foo/**/ba[rz]", + expected: false, + }, + { + reference: "foo/any/bat", + pattern: "foo/**/ba[rz]", + expected: false, + }, + { + reference: "foo/a/bar", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/b/baz", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/*/baz:tag", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/c/baz:tag", + expected: true, + }, + { + reference: "example.com/foo/c/baz:tag", + pattern: "*/foo/c/baz", + expected: true, + }, + { + reference: "example.com/foo/c/baz:tag", + pattern: "example.com/foo/c/baz", + expected: true, + }, + } + for _, c := range matchCases { + named, err := ParseAnyReference(c.reference) + if err != nil { + t.Fatal(err) + } + actual, err := FamiliarMatch(c.pattern, named) + if err != nil { + t.Fatal(err) + } + if actual != c.expected { + t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual) + } + } +} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index bb09fa25dae..2f66cca87a3 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -4,28 +4,32 @@ // Grammar // // reference := name [ ":" tag ] [ "@" digest ] -// name := [hostname '/'] component ['/' component]* -// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] -// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ -// component := alpha-numeric [separator alpha-numeric]* +// path-component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ // // digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ package reference import ( "errors" "fmt" + "strings" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) const ( @@ -43,11 +47,17 @@ var ( // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. ErrDigestInvalidFormat = errors.New("invalid digest format") + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") ) // Reference is an opaque object reference identifier that may include @@ -121,23 +131,56 @@ type Digested interface { } // Canonical reference is an object with a fully unique -// name including a name with hostname and digest +// name including a name with domain and digest type Canonical interface { Named Digest() digest.Digest } +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + // SplitHostname splits a named reference into a // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name +// DEPRECATED: Use Domain or Path func SplitHostname(named Named) (string, string) { - name := named.Name() - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return "", name + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() } - return match[1], match[2] + return splitDomain(named.Name()) } // Parse parses s and returns a syntactically valid Reference. @@ -149,7 +192,9 @@ func Parse(s string) (Reference, error) { if s == "" { return nil, ErrNameEmpty } - // TODO(dmcgowan): Provide more specific and helpful error + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } return nil, ErrReferenceInvalidFormat } @@ -157,13 +202,24 @@ func Parse(s string) (Reference, error) { return nil, ErrNameTooLong } + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if nameMatch != nil && len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + ref := reference{ - name: matches[1], - tag: matches[2], + namedRepository: repo, + tag: matches[2], } if matches[3] != "" { var err error - ref.digest, err = digest.ParseDigest(matches[3]) + ref.digest, err = digest.Parse(matches[3]) if err != nil { return nil, err } @@ -178,18 +234,17 @@ func Parse(s string) (Reference, error) { } // ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name, otherwise an error is -// returned. +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. // If an error was encountered it is returned, along with a nil Reference. // NOTE: ParseNamed will not handle short digests. func ParseNamed(s string) (Named, error) { - ref, err := Parse(s) + named, err := ParseNormalizedNamed(s) if err != nil { return nil, err } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) + if named.String() != s { + return nil, ErrNameNotCanonical } return named, nil } @@ -200,10 +255,15 @@ func WithName(name string) (Named, error) { if len(name) > NameTotalLengthMax { return nil, ErrNameTooLong } - if !anchoredNameRegexp.MatchString(name) { + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { return nil, ErrReferenceInvalidFormat } - return repository(name), nil + return repository{ + domain: match[1], + path: match[2], + }, nil } // WithTag combines the name from "name" and the tag from "tag" to form a @@ -212,9 +272,23 @@ func WithTag(name Named, tag string) (NamedTagged, error) { if !anchoredTagRegexp.MatchString(tag) { return nil, ErrTagInvalidFormat } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } return taggedReference{ - name: name.Name(), - tag: tag, + namedRepository: repo, + tag: tag, }, nil } @@ -224,14 +298,37 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) { if !anchoredDigestRegexp.MatchString(digest.String()) { return nil, ErrDigestInvalidFormat } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } return canonicalReference{ - name: name.Name(), - digest: digest, + namedRepository: repo, + digest: digest, }, nil } +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + func getBestReferenceType(ref reference) Reference { - if ref.name == "" { + if ref.Name() == "" { // Allow digest only references if ref.digest != "" { return digestReference(ref.digest) @@ -241,16 +338,16 @@ func getBestReferenceType(ref reference) Reference { if ref.tag == "" { if ref.digest != "" { return canonicalReference{ - name: ref.name, - digest: ref.digest, + namedRepository: ref.namedRepository, + digest: ref.digest, } } - return repository(ref.name) + return ref.namedRepository } if ref.digest == "" { return taggedReference{ - name: ref.name, - tag: ref.tag, + namedRepository: ref.namedRepository, + tag: ref.tag, } } @@ -258,17 +355,13 @@ func getBestReferenceType(ref reference) Reference { } type reference struct { - name string + namedRepository tag string digest digest.Digest } func (r reference) String() string { - return r.name + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Name() string { - return r.name + return r.Name() + ":" + r.tag + "@" + r.digest.String() } func (r reference) Tag() string { @@ -279,20 +372,34 @@ func (r reference) Digest() digest.Digest { return r.digest } -type repository string +type repository struct { + domain string + path string +} func (r repository) String() string { - return string(r) + return r.Name() } func (r repository) Name() string { - return string(r) + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path } type digestReference digest.Digest func (d digestReference) String() string { - return d.String() + return digest.Digest(d).String() } func (d digestReference) Digest() digest.Digest { @@ -300,16 +407,12 @@ func (d digestReference) Digest() digest.Digest { } type taggedReference struct { - name string - tag string + namedRepository + tag string } func (t taggedReference) String() string { - return t.name + ":" + t.tag -} - -func (t taggedReference) Name() string { - return t.name + return t.Name() + ":" + t.tag } func (t taggedReference) Tag() string { @@ -317,16 +420,12 @@ func (t taggedReference) Tag() string { } type canonicalReference struct { - name string + namedRepository digest digest.Digest } func (c canonicalReference) String() string { - return c.name + "@" + c.digest.String() -} - -func (c canonicalReference) Name() string { - return c.name + return c.Name() + "@" + c.digest.String() } func (c canonicalReference) Digest() digest.Digest { diff --git a/vendor/github.com/docker/distribution/reference/reference_test.go b/vendor/github.com/docker/distribution/reference/reference_test.go index cde1a7a2e21..16b871f987b 100644 --- a/vendor/github.com/docker/distribution/reference/reference_test.go +++ b/vendor/github.com/docker/distribution/reference/reference_test.go @@ -1,12 +1,14 @@ package reference import ( + _ "crypto/sha256" + _ "crypto/sha512" "encoding/json" "strconv" "strings" "testing" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) func TestReferenceParse(t *testing.T) { @@ -19,8 +21,8 @@ func TestReferenceParse(t *testing.T) { err error // repository is the string representation for the reference repository string - // hostname is the hostname expected in the reference - hostname string + // domain is the domain expected in the reference + domain string // tag is the tag for the reference tag string // digest is the digest for the reference (enforces digest reference) @@ -42,37 +44,37 @@ func TestReferenceParse(t *testing.T) { }, { input: "test.com/repo:tag", - hostname: "test.com", + domain: "test.com", repository: "test.com/repo", tag: "tag", }, { input: "test:5000/repo", - hostname: "test:5000", + domain: "test:5000", repository: "test:5000/repo", }, { input: "test:5000/repo:tag", - hostname: "test:5000", + domain: "test:5000", repository: "test:5000/repo", tag: "tag", }, { input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - hostname: "test:5000", + domain: "test:5000", repository: "test:5000/repo", digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - hostname: "test:5000", + domain: "test:5000", repository: "test:5000/repo", tag: "tag", digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { input: "test:5000/repo", - hostname: "test:5000", + domain: "test:5000", repository: "test:5000/repo", }, { @@ -95,13 +97,32 @@ func TestReferenceParse(t *testing.T) { input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", err: digest.ErrDigestUnsupported, }, + { + input: "Uppercase:tag", + err: ErrNameContainsUppercase, + }, + // FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes. + // See https://github.com/docker/distribution/pull/1778, and https://github.com/docker/docker/pull/20175 + //{ + // input: "Uppercase/lowercase:tag", + // err: ErrNameContainsUppercase, + //}, + { + input: "test:5000/Uppercase/lowercase:tag", + err: ErrNameContainsUppercase, + }, + { + input: "lowercase:Uppercase", + repository: "lowercase", + tag: "Uppercase", + }, { input: strings.Repeat("a/", 128) + "a:tag", err: ErrNameTooLong, }, { input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", - hostname: "a", + domain: "a", repository: strings.Repeat("a/", 127) + "a", tag: "tag-puts-this-over-max", }, @@ -111,30 +132,30 @@ func TestReferenceParse(t *testing.T) { }, { input: "sub-dom1.foo.com/bar/baz/quux", - hostname: "sub-dom1.foo.com", + domain: "sub-dom1.foo.com", repository: "sub-dom1.foo.com/bar/baz/quux", }, { input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", - hostname: "sub-dom1.foo.com", + domain: "sub-dom1.foo.com", repository: "sub-dom1.foo.com/bar/baz/quux", tag: "some-long-tag", }, { input: "b.gcr.io/test.example.com/my-app:test.example.com", - hostname: "b.gcr.io", + domain: "b.gcr.io", repository: "b.gcr.io/test.example.com/my-app", tag: "test.example.com", }, { input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode - hostname: "xn--n3h.com", + domain: "xn--n3h.com", repository: "xn--n3h.com/myimage", tag: "xn--n3h.com", }, { input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode - hostname: "xn--7o8h.com", + domain: "xn--7o8h.com", repository: "xn--7o8h.com/myimage", tag: "xn--7o8h.com", digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", @@ -146,7 +167,7 @@ func TestReferenceParse(t *testing.T) { }, { input: "foo/foo_bar.com:8080", - hostname: "foo", + domain: "foo", repository: "foo/foo_bar.com", tag: "8080", }, @@ -177,11 +198,11 @@ func TestReferenceParse(t *testing.T) { if named.Name() != testcase.repository { failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) } - hostname, _ := SplitHostname(named) - if hostname != testcase.hostname { - failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) + domain, _ := SplitHostname(named) + if domain != testcase.domain { + failf("unexpected domain: got %q, expected %q", domain, testcase.domain) } - } else if testcase.repository != "" || testcase.hostname != "" { + } else if testcase.repository != "" || testcase.domain != "" { failf("expected named type, got %T", repo) } @@ -261,39 +282,39 @@ func TestWithNameFailure(t *testing.T) { func TestSplitHostname(t *testing.T) { testcases := []struct { - input string - hostname string - name string + input string + domain string + name string }{ { - input: "test.com/foo", - hostname: "test.com", - name: "foo", + input: "test.com/foo", + domain: "test.com", + name: "foo", }, { - input: "test_com/foo", - hostname: "", - name: "test_com/foo", + input: "test_com/foo", + domain: "", + name: "test_com/foo", }, { - input: "test:8080/foo", - hostname: "test:8080", - name: "foo", + input: "test:8080/foo", + domain: "test:8080", + name: "foo", }, { - input: "test.com:8080/foo", - hostname: "test.com:8080", - name: "foo", + input: "test.com:8080/foo", + domain: "test.com:8080", + name: "foo", }, { - input: "test-com:8080/foo", - hostname: "test-com:8080", - name: "foo", + input: "test-com:8080/foo", + domain: "test-com:8080", + name: "foo", }, { - input: "xn--n3h.com:18080/foo", - hostname: "xn--n3h.com:18080", - name: "foo", + input: "xn--n3h.com:18080/foo", + domain: "xn--n3h.com:18080", + name: "foo", }, } for _, testcase := range testcases { @@ -306,9 +327,9 @@ func TestSplitHostname(t *testing.T) { if err != nil { failf("error parsing name: %s", err) } - hostname, name := SplitHostname(named) - if hostname != testcase.hostname { - failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) + domain, name := SplitHostname(named) + if domain != testcase.domain { + failf("unexpected domain: got %q, expected %q", domain, testcase.domain) } if name != testcase.name { failf("unexpected name: got %q, expected %q", name, testcase.name) @@ -448,6 +469,7 @@ func TestSerialization(t *testing.T) { func TestWithTag(t *testing.T) { testcases := []struct { name string + digest digest.Digest tag string combined string }{ @@ -471,6 +493,12 @@ func TestWithTag(t *testing.T) { tag: "TAG5", combined: "test.com:8000/foo:TAG5", }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + tag: "TAG5", + combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765", + }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { @@ -482,6 +510,14 @@ func TestWithTag(t *testing.T) { if err != nil { failf("error parsing name: %s", err) } + if testcase.digest != "" { + canonical, err := WithDigest(named, testcase.digest) + if err != nil { + failf("error adding digest") + } + named = canonical + } + tagged, err := WithTag(named, testcase.tag) if err != nil { failf("WithTag failed: %s", err) @@ -496,6 +532,7 @@ func TestWithDigest(t *testing.T) { testcases := []struct { name string digest digest.Digest + tag string combined string }{ { @@ -513,6 +550,12 @@ func TestWithDigest(t *testing.T) { digest: "sha256:1234567890098765432112345667890098765", combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + tag: "latest", + combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765", + }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { @@ -524,6 +567,13 @@ func TestWithDigest(t *testing.T) { if err != nil { failf("error parsing name: %s", err) } + if testcase.tag != "" { + tagged, err := WithTag(named, testcase.tag) + if err != nil { + failf("error adding tag") + } + named = tagged + } digested, err := WithDigest(named, testcase.digest) if err != nil { failf("WithDigest failed: %s", err) @@ -533,3 +583,77 @@ func TestWithDigest(t *testing.T) { } } } + +func TestParseNamed(t *testing.T) { + testcases := []struct { + input string + domain string + name string + err error + }{ + { + input: "test.com/foo", + domain: "test.com", + name: "foo", + }, + { + input: "test:8080/foo", + domain: "test:8080", + name: "foo", + }, + { + input: "test_com/foo", + err: ErrNameNotCanonical, + }, + { + input: "test.com", + err: ErrNameNotCanonical, + }, + { + input: "foo", + err: ErrNameNotCanonical, + }, + { + input: "library/foo", + err: ErrNameNotCanonical, + }, + { + input: "docker.io/library/foo", + domain: "docker.io", + name: "library/foo", + }, + // Ambiguous case, parser will add "library/" to foo + { + input: "docker.io/foo", + err: ErrNameNotCanonical, + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + named, err := ParseNamed(testcase.input) + if err != nil && testcase.err == nil { + failf("error parsing name: %s", err) + continue + } else if err == nil && testcase.err != nil { + failf("parsing succeded: expected error %v", testcase.err) + continue + } else if err != testcase.err { + failf("unexpected error %v, expected %v", err, testcase.err) + continue + } else if err != nil { + continue + } + + domain, name := SplitHostname(named) + if domain != testcase.domain { + failf("unexpected domain: got %q, expected %q", domain, testcase.domain) + } + if name != testcase.name { + failf("unexpected name: got %q, expected %q", name, testcase.name) + } + } +} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go index 9a7d366bc8a..78603493203 100644 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -19,18 +19,18 @@ var ( alphaNumericRegexp, optional(repeated(separatorRegexp, alphaNumericRegexp))) - // hostnameComponentRegexp restricts the registry hostname component of a - // repository name to start with a component as defined by hostnameRegexp + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp // and followed by an optional port. - hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - // hostnameRegexp defines the structure of potential hostname components + // DomainRegexp defines the structure of potential domain components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. - hostnameRegexp = expression( - hostnameComponentRegexp, - optional(repeated(literal(`.`), hostnameComponentRegexp)), + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), optional(literal(`:`), match(`[0-9]+`))) // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. @@ -48,17 +48,17 @@ var ( anchoredDigestRegexp = anchored(DigestRegexp) // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the hostname and name part omitting + // regexp has capturing groups for the domain and name part omitting // the separating forward slash from either. NameRegexp = expression( - optional(hostnameRegexp, literal(`/`)), + optional(DomainRegexp, literal(`/`)), nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp))) // anchoredNameRegexp is used to parse a name value, capturing the - // hostname and trailing components. + // domain and trailing components. anchoredNameRegexp = anchored( - optional(capture(hostnameRegexp), literal(`/`)), + optional(capture(DomainRegexp), literal(`/`)), capture(nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp)))) @@ -68,6 +68,25 @@ var ( ReferenceRegexp = anchored(capture(NameRegexp), optional(literal(":"), capture(TagRegexp)), optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) ) // match compiles the string to a regular expression. diff --git a/vendor/github.com/docker/distribution/reference/regexp_test.go b/vendor/github.com/docker/distribution/reference/regexp_test.go index 2ec39377af8..09bc819275d 100644 --- a/vendor/github.com/docker/distribution/reference/regexp_test.go +++ b/vendor/github.com/docker/distribution/reference/regexp_test.go @@ -33,7 +33,7 @@ func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { } } -func TestHostRegexp(t *testing.T) { +func TestDomainRegexp(t *testing.T) { hostcases := []regexpMatch{ { input: "test.com", @@ -116,7 +116,7 @@ func TestHostRegexp(t *testing.T) { match: true, }, } - r := regexp.MustCompile(`^` + hostnameRegexp.String() + `$`) + r := regexp.MustCompile(`^` + DomainRegexp.String() + `$`) for i := range hostcases { checkRegexp(t, r, hostcases[i]) } @@ -487,3 +487,67 @@ func TestReferenceRegexp(t *testing.T) { } } + +func TestIdentifierRegexp(t *testing.T) { + fullCases := []regexpMatch{ + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", + match: true, + }, + { + input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", + match: false, + }, + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", + match: false, + }, + { + input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", + match: false, + }, + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", + match: false, + }, + } + + shortCases := []regexpMatch{ + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", + match: true, + }, + { + input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", + match: false, + }, + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", + match: true, + }, + { + input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", + match: false, + }, + { + input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", + match: false, + }, + { + input: "da304", + match: false, + }, + { + input: "da304e", + match: true, + }, + } + + for i := range fullCases { + checkRegexp(t, anchoredIdentifierRegexp, fullCases[i]) + } + + for i := range shortCases { + checkRegexp(t, anchoredShortIdentifierRegexp, shortCases[i]) + } +} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go index 1ede31ebb63..1da1d533ff7 100644 --- a/vendor/github.com/docker/distribution/registry.go +++ b/vendor/github.com/docker/distribution/registry.go @@ -35,7 +35,7 @@ type Namespace interface { // reference. Repository(ctx context.Context, name reference.Named) (Repository, error) - // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories + // Repositories fills 'repos' with a lexicographically sorted catalog of repositories // up to the size of 'repos' and returns the value 'n' for the number of entries // which were filled. 'last' contains an offset in the catalog, and 'err' will be // set to io.EOF if there are no more entries to obtain. diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf new file mode 100644 index 00000000000..d67edd779ea --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor.conf @@ -0,0 +1,43 @@ +github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e +github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 +github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 +github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716 +github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a +github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 +github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 +github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 +github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 +github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 +github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 +github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 +github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a +github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b +github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 +github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef +github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 +github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 +github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 +github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 +github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 +github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e +github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 +github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 +golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b +golang.org/x/net 4876518f9e71663000c348837735820161a42df7 +golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 +google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 +google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 +google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 +gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 +gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b +gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 +rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb diff --git a/vendor/github.com/docker/engine-api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go similarity index 100% rename from vendor/github.com/docker/engine-api/types/auth.go rename to vendor/github.com/docker/docker/api/types/auth.go diff --git a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go similarity index 100% rename from vendor/github.com/docker/engine-api/types/blkiodev/blkio.go rename to vendor/github.com/docker/docker/api/types/blkiodev/blkio.go diff --git a/vendor/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go similarity index 52% rename from vendor/github.com/docker/engine-api/types/client.go rename to vendor/github.com/docker/docker/api/types/client.go index fa3b2cfb458..7900d64f0de 100644 --- a/vendor/github.com/docker/engine-api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -4,12 +4,31 @@ import ( "bufio" "io" "net" + "os" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/filters" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" "github.com/docker/go-units" ) +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + // ContainerAttachOptions holds parameters to attach to a container. type ContainerAttachOptions struct { Stream bool @@ -17,6 +36,7 @@ type ContainerAttachOptions struct { Stdout bool Stderr bool DetachKeys string + Logs bool } // ContainerCommitOptions holds parameters to commit changes into a container. @@ -35,18 +55,19 @@ type ContainerExecInspect struct { ContainerID string Running bool ExitCode int + Pid int } // ContainerListOptions holds parameters to list containers with. type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filter filters.Args + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args } // ContainerLogsOptions holds parameters to filter logs with. @@ -67,13 +88,19 @@ type ContainerRemoveOptions struct { Force bool } +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + // CopyToContainerOptions holds information // about files to copy into a container type CopyToContainerOptions struct { AllowOverwriteDirWithFile bool } -// EventsOptions hold parameters to filter events with. +// EventsOptions holds parameters to filter events with. type EventsOptions struct { Since string Until string @@ -129,13 +156,25 @@ type ImageBuildOptions struct { Memory int64 MemorySwap int64 CgroupParent string + NetworkMode string ShmSize int64 Dockerfile string Ulimits []*units.Ulimit - BuildArgs map[string]string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string + // See the parsing of buildArgs in api/server/router/build/build_routes.go + // for an explaination of why BuildArgs needs to use *string instead of + // just a string + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string } // ImageBuildResponse holds information @@ -166,9 +205,8 @@ type ImageImportOptions struct { // ImageListOptions holds parameters to filter the list of images with. type ImageListOptions struct { - MatchName string - All bool - Filters filters.Args + All bool + Filters filters.Args } // ImageLoadResponse returns information to the client about a load process. @@ -207,19 +245,15 @@ type ImageSearchOptions struct { RegistryAuth string PrivilegeFunc RequestPrivilegeFunc Filters filters.Args -} - -// ImageTagOptions holds parameters to tag an image -type ImageTagOptions struct { - Force bool + Limit int } // ResizeOptions holds parameters to resize a tty. // It can be used to resize container ttys and // exec process ttys too. type ResizeOptions struct { - Height int - Width int + Height uint + Width uint } // VersionResponse holds version information for the client and the server @@ -233,3 +267,112 @@ type VersionResponse struct { func (v VersionResponse) ServerOK() bool { return v.Server != nil } + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SecretRequestOption is a type for requesting secrets +type SecretRequestOption struct { + Source string + Target string + UID string + GID string + Mode os.FileMode +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/docker/engine-api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go similarity index 73% rename from vendor/github.com/docker/engine-api/types/configs.go rename to vendor/github.com/docker/docker/api/types/configs.go index 7d4fcb343e3..20c19f21324 100644 --- a/vendor/github.com/docker/engine-api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -1,8 +1,8 @@ package types import ( - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" ) // configs holds structs used for internal communication between the @@ -45,9 +45,25 @@ type ExecConfig struct { Privileged bool // Is the container in privileged mode Tty bool // Attach standard streams to a tty. AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard output - AttachStdout bool // Attach the standard error + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output Detach bool // Execute in detach mode DetachKeys string // Escape keys for detach + Env []string // Environment variables Cmd []string // Execution commands and args } + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go new file mode 100644 index 00000000000..fc050e5dba9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -0,0 +1,62 @@ +package container + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go new file mode 100644 index 00000000000..d028e3b121b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody container create created body +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 00000000000..81ee12c6781 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody container update o k body +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go new file mode 100644 index 00000000000..16cf3353213 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBody container wait o k body +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go similarity index 88% rename from vendor/github.com/docker/engine-api/types/container/host_config.go rename to vendor/github.com/docker/docker/api/types/container/host_config.go index 2446c1904d8..0c82d625e82 100644 --- a/vendor/github.com/docker/engine-api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -3,8 +3,9 @@ package container import ( "strings" - "github.com/docker/engine-api/types/blkiodev" - "github.com/docker/engine-api/types/strslice" + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" "github.com/docker/go-units" ) @@ -195,7 +196,7 @@ type RestartPolicy struct { // IsNone indicates whether the container has the "no" restart policy. // This means the container will not automatically restart when exiting. func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" + return rp.Name == "no" || rp.Name == "" } // IsAlways indicates whether the container has the "always" restart policy. @@ -233,6 +234,7 @@ type Resources struct { // Applicable to all platforms CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. // Applicable to UNIX platforms CgroupParent string // Parent cgroup. @@ -242,8 +244,10 @@ type Resources struct { BlkioDeviceWriteBps []*blkiodev.ThrottleDevice BlkioDeviceReadIOps []*blkiodev.ThrottleDevice BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime CpusetCpus string // CpusetCpus 0-2, 0,1 CpusetMems string // CpusetMems 0-2, 0,1 Devices []DeviceMapping // List of devices to map inside the container @@ -257,11 +261,10 @@ type Resources struct { Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive - NetworkMaximumBandwidth uint64 // Maximum bandwidth of the network endpoint in bytes per second + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive } // UpdateConfig holds the mutable attributes of a Container. @@ -304,17 +307,27 @@ type HostConfig struct { PublishAllPorts bool // Should docker publish all exposed port for the container ReadonlyRootfs bool // Is the container root filesystem in read-only SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string // Storage driver options per container. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container UTSMode UTSMode // UTS namespace to use for the container UsernsMode UsernsMode // The user namespace to use for the container ShmSize int64 // Total shm memory usage Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container // Applicable to Windows - ConsoleSize [2]int // Initial console size + ConsoleSize [2]uint // Initial console size (height,width) Isolation Isolation // Isolation technology of the container (eg default, hyperv) // Contains container's resources (cgroups, ulimits) Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` + + // Custom init path + InitPath string `json:",omitempty"` } diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go similarity index 96% rename from vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go rename to vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go index 4171059a476..9fb79bed6f3 100644 --- a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -9,7 +9,7 @@ func (i Isolation) IsValid() bool { return i.IsDefault() } -// IsPrivate indicates whether container uses it's private network stack. +// IsPrivate indicates whether container uses its private network stack. func (n NetworkMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer()) } diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go similarity index 100% rename from vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go rename to vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 00000000000..dc942d9d9ef --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/docker/engine-api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go similarity index 92% rename from vendor/github.com/docker/engine-api/types/filters/parse.go rename to vendor/github.com/docker/docker/api/types/filters/parse.go index 0e0d7e38054..e01a41deb81 100644 --- a/vendor/github.com/docker/engine-api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -9,7 +9,7 @@ import ( "regexp" "strings" - "github.com/docker/engine-api/types/versions" + "github.com/docker/docker/api/types/versions" ) // Args stores filter arguments as map key:{map key: bool}. @@ -144,6 +144,9 @@ func (filters Args) Add(name, value string) { func (filters Args) Del(name, value string) { if _, ok := filters.fields[name]; ok { delete(filters.fields[name], value) + if len(filters.fields[name]) == 0 { + delete(filters.fields, name) + } } } @@ -165,7 +168,7 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool { return true } - if sources == nil || len(sources) == 0 { + if len(sources) == 0 { return false } @@ -215,10 +218,22 @@ func (filters Args) ExactMatch(field, source string) bool { } // try to match full name value to avoid O(N) regular expression matching - if fieldValues[source] { + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. +func (filters Args) UniqueExactMatch(field, source string) bool { + fieldValues := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { return true } - return false + if len(filters.fields[field]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] } // FuzzyMatch returns true if the source matches exactly one of the filters, diff --git a/vendor/github.com/docker/engine-api/types/filters/parse_test.go b/vendor/github.com/docker/docker/api/types/filters/parse_test.go similarity index 93% rename from vendor/github.com/docker/engine-api/types/filters/parse_test.go rename to vendor/github.com/docker/docker/api/types/filters/parse_test.go index 037783b9101..b2ed27b9cee 100644 --- a/vendor/github.com/docker/engine-api/types/filters/parse_test.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse_test.go @@ -318,6 +318,29 @@ func TestExactMatch(t *testing.T) { } } +func TestOnlyOneExactMatch(t *testing.T) { + f := NewArgs() + + if !f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + + if !f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to match `running` with one of the filters, got false") + } + + if f.UniqueExactMatch("status", "paused") { + t.Fatalf("Expected to not match `paused` with one of the filters, got true") + } + + f.Add("status", "pause") + if f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to not match only `running` with two filters, got true") + } +} + func TestInclude(t *testing.T) { f := NewArgs() if f.Include("status") { diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 00000000000..7592d2f8b15 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 00000000000..e145b3dcfcd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go new file mode 100644 index 00000000000..31f2365b8ec --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -0,0 +1,113 @@ +package mount + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/vendor/github.com/docker/engine-api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go similarity index 86% rename from vendor/github.com/docker/engine-api/types/network/network.go rename to vendor/github.com/docker/docker/api/types/network/network.go index bce60f5eec4..832b3edb9fa 100644 --- a/vendor/github.com/docker/engine-api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -23,8 +23,15 @@ type IPAMConfig struct { // EndpointIPAMConfig represents IPAM configurations for the endpoint type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// PeerInfo represents one peer of a overlay network +type PeerInfo struct { + Name string + IP string } // EndpointSettings stores the network endpoint details diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go new file mode 100644 index 00000000000..6cc7a23b02a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -0,0 +1,189 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True when the plugin is running. False when the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 00000000000..56990106755 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 00000000000..32962dc2ebe --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 00000000000..c82f204e870 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 00000000000..5c031cf8b5c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 00000000000..d6f75531196 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,64 @@ +package types + +import ( + "encoding/json" + "fmt" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +const ( + authzDriver = "AuthzDriver" + graphDriver = "GraphDriver" + ipamDriver = "IpamDriver" + networkDriver = "NetworkDriver" + volumeDriver = "VolumeDriver" +) + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 00000000000..ad52d46d560 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // IP + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 00000000000..5e37d19bd48 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/docker/engine-api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go similarity index 96% rename from vendor/github.com/docker/engine-api/types/registry/registry.go rename to vendor/github.com/docker/docker/api/types/registry/registry.go index d2aca6f024b..28fafab901b 100644 --- a/vendor/github.com/docker/engine-api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -16,6 +16,11 @@ type ServiceConfig struct { // unmarshalled to JSON type NetIPNet net.IPNet +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + // MarshalJSON returns the JSON representation of the IPNet func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { return json.Marshal((*net.IPNet)(ipnet).String()) diff --git a/vendor/github.com/docker/engine-api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go similarity index 58% rename from vendor/github.com/docker/engine-api/types/seccomp.go rename to vendor/github.com/docker/docker/api/types/seccomp.go index e0305a9e378..4f02ef36b8e 100644 --- a/vendor/github.com/docker/engine-api/types/seccomp.go +++ b/vendor/github.com/docker/docker/api/types/seccomp.go @@ -2,12 +2,22 @@ package types // Seccomp represents the config for a seccomp profile for syscall restriction. type Seccomp struct { - DefaultAction Action `json:"defaultAction"` - Architectures []Arch `json:"architectures"` - Syscalls []*Syscall `json:"syscalls"` + DefaultAction Action `json:"defaultAction"` + // Architectures is kept to maintain backward compatibility with the old + // seccomp profile. + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` } -// Arch used for additional architectures +// Architecture is used to represent an specific architecture +// and its sub-architectures +type Architecture struct { + Arch Arch `json:"architecture"` + SubArches []Arch `json:"subArchitectures"` +} + +// Arch used for architectures type Arch string // Additional architectures permitted to be used for system calls @@ -24,6 +34,11 @@ const ( ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" ) // Action taken upon Seccomp rule match @@ -60,9 +75,19 @@ type Arg struct { Op Operator `json:"op"` } -// Syscall is used to match a syscall in Seccomp +// Filter is used to conditionally apply Seccomp rules +type Filter struct { + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` +} + +// Syscall is used to match a group of syscalls in Seccomp type Syscall struct { - Name string `json:"name"` - Action Action `json:"action"` - Args []*Arg `json:"args"` + Name string `json:"name,omitempty"` + Names []string `json:"names,omitempty"` + Action Action `json:"action"` + Args []*Arg `json:"args"` + Comment string `json:"comment"` + Includes Filter `json:"includes"` + Excludes Filter `json:"excludes"` } diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go new file mode 100644 index 00000000000..74ea64b1bb6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go new file mode 100644 index 00000000000..9bf1928b8c9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,178 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/docker/engine-api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go similarity index 100% rename from vendor/github.com/docker/engine-api/types/strslice/strslice.go rename to vendor/github.com/docker/docker/api/types/strslice/strslice.go diff --git a/vendor/github.com/docker/engine-api/types/strslice/strslice_test.go b/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go similarity index 100% rename from vendor/github.com/docker/engine-api/types/strslice/strslice_test.go rename to vendor/github.com/docker/docker/api/types/strslice/strslice_test.go diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go new file mode 100644 index 00000000000..64a648bad15 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -0,0 +1,27 @@ +package swarm + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` +} + +// Driver represents a driver (network, logging). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go new file mode 100644 index 00000000000..4ab476ccc39 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -0,0 +1,46 @@ +package swarm + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go new file mode 100644 index 00000000000..5a5e11bdba5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -0,0 +1,111 @@ +package swarm + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go new file mode 100644 index 00000000000..379e17a779a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -0,0 +1,114 @@ +package swarm + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 00000000000..fdb2388888d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,31 @@ +package swarm + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go new file mode 100644 index 00000000000..2cf2642c1fa --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -0,0 +1,105 @@ +package swarm + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus UpdateStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt time.Time `json:",omitempty"` + CompletedAt time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go new file mode 100644 index 00000000000..0b422196960 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -0,0 +1,197 @@ +package swarm + +import "time" + +// ClusterInfo represents info about the cluster for outputing in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + ForceNewCluster bool + Spec Spec + AutoLockManagers bool +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + RemoteAddrs []string + JoinToken string // accept by secret +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int + Managers int + + Cluster ClusterInfo +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go new file mode 100644 index 00000000000..ace12cc89fa --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -0,0 +1,128 @@ +package swarm + +import "time" + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + ContainerSpec ContainerSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 +} + +// Resources represents resources (CPU/Memory). +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Resources `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string `json:",omitempty"` + PID int `json:",omitempty"` + ExitCode int `json:",omitempty"` +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go new file mode 100644 index 00000000000..a82c3e88ef5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -0,0 +1,549 @@ +package types + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// ContainerChange contains response of Engine API: +// GET "/containers/{name:.*}/changes" +type ContainerChange struct { + Kind int + Path string +} + +// ImageHistory contains response of Engine API: +// GET "/images/{name:.*}/history" +type ImageHistory struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +// ImageDelete contains response of Engine API: +// DELETE "/images/{name:.*}" +type ImageDelete struct { + Untagged string `json:",omitempty"` + Deleted string `json:",omitempty"` +} + +// GraphDriverData returns Image's graph driver config info +// when calling inspect command +type GraphDriverData struct { + Name string + Data map[string]string +} + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// ContainerProcessList contains response of Engine API: +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + Experimental bool +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit records a external tool actual commit id version along the +// one expect by dockerd as set at build time +type Commit struct { + ID string + Expected string +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only available in Docker Swarm +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` + Name string + RestartCount int + Driver string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + CheckDuplicate bool + Driver string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDelete + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/engine-api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md similarity index 88% rename from vendor/github.com/docker/engine-api/types/versions/README.md rename to vendor/github.com/docker/docker/api/types/versions/README.md index 76c516e6a31..cdac50a53c0 100644 --- a/vendor/github.com/docker/engine-api/types/versions/README.md +++ b/vendor/github.com/docker/docker/api/types/versions/README.md @@ -9,6 +9,6 @@ Consider moving a type here when you need to keep backwards compatibility in the The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: 1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains abount it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/engine-api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go similarity index 100% rename from vendor/github.com/docker/engine-api/types/versions/compare.go rename to vendor/github.com/docker/docker/api/types/versions/compare.go diff --git a/vendor/github.com/docker/engine-api/types/versions/compare_test.go b/vendor/github.com/docker/docker/api/types/versions/compare_test.go similarity index 100% rename from vendor/github.com/docker/engine-api/types/versions/compare_test.go rename to vendor/github.com/docker/docker/api/types/versions/compare_test.go diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go new file mode 100644 index 00000000000..da4f8ebd9c9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -0,0 +1,58 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData volume usage data +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. + // Required: true + RefCount int64 `json:"RefCount"` + + // The disk space used by the volume (local driver only) + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/docker/engine-api/.travis.yml b/vendor/github.com/docker/engine-api/.travis.yml deleted file mode 100644 index d085af29b2a..00000000000 --- a/vendor/github.com/docker/engine-api/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- - language: go - sudo: false - notifications: - email: false - go: - - 1.6 - install: make deps - script: make validate && make test diff --git a/vendor/github.com/docker/engine-api/CHANGELOG.md b/vendor/github.com/docker/engine-api/CHANGELOG.md deleted file mode 100644 index 33cefd6a5c8..00000000000 --- a/vendor/github.com/docker/engine-api/CHANGELOG.md +++ /dev/null @@ -1,141 +0,0 @@ -# Changelog - -Items starting with DEPRECATE are important deprecation notices. For more information on the list of deprecated APIs please have a look at https://docs.docker.com/misc/deprecated/ where target removal dates can also be found. - -## 0.3.2 (2016-03-30) - -## Client - -- Revert setting the ServerName in the TLS configuration at client init. See https://github.com/docker/swarm/issues/2027. - -## 0.3.1 (2016-03-23) - -### Client - -- Ensure that API paths are properly escaped. - -## 0.3.0 (2016-03-22) - -### Client - -- Add context to every function. -- Fix issue loading a default TLS CA. -- Allow to configure the client with a given http.Client. -- Add support for Windows named pipes. -- Set default host for Solaris. -- Add quiet flag for image load. -- Add ability to hijack connections through a proxy. -- Correctly set content type for image load. -- Add support for getting token for login. - -### Types - -- Add struct for update restart policy. -- Add human friendly State for container. -- Use OS specific host when DOCKER_HOST is not set. -- Rename Status in info to SystemStatus. -- Add internal flag to network inspect. -- Add disk quota field to container. -- Add EnableIPv6 fields. -- Add Mounts to container. -- Add cgroup driver to info. -- Add userns to host config. -- Remove email from AuthConfig. -- Make AuthConfig fields optional. -- Add IO resource settings for Windows. -- Add storage driver to host config. -- Update NetworkName to return proper user defined network names. -- Support joining cgroups by container id. -- Add KernelMemory to info. -- Add UsernsMode to container config. -- Add CPU resource control for Windows. -- Add AutoRemove to host config. -- Add Status field to Volume. -- Add Label to Image, Network and Volume. -- Add RootFS to container. - -## 0.2.3 (2016-02-02) - -### Types - -- Add missing status field. - -## 0.2.2 (2016-01-13) - -### Client - -- Fix issue configuring response hijacking with TLS enabled. - - -## 0.2.1 (2016-01-12) - -### Client - -- Fix issue detecting missing images on container creation. - -### Types - -- Remove invalid json tag in endpoint configuration. -- Add missing fields in info structure. - -## 0.2.0 (2016-01-11) - -### Client - -- Allow to force network disconnection. (docker 1.10) - -### Types - -- Add global and local alias configuration to network endpoint. -- Add network ID to network endpoint. -- Add IPAM options. -- Add Seccomp options. -- Fix issue referencing OOMKillDisable. - - -## 0.1.3 (2016-01-07) - -### Client - -- Fix issue sending all network configurations for a per network request. - - -## 0.1.2 (2016-01-07) - -### Client - -- Add interface to represent the API client. -- Restrict the fields send to the update endpoint to only those that are used. -- Send network settings as part of the container create request. (docker 1.10) -- Send network settings as part of the network connect request. (docker 1.10) - -### Types - -- Add PidsLimit as part of the host configuration. -- Add PidsStats to show PID stats. -- Add graph storage options to host configuration. -- Add NetworkConfig and EndpointIPAMConfig structs. (docker 1.10) - - -## 0.1.1 (2016-01-06) - -### Client - -- Delegate shmSize units conversion to the consumer. - -### Types - -- Add warnings to the volume list reponse. -- Fix image build options: - * use 0 as default value for shmSize. - - -## 0.1.0 (2016-01-04) - -### Client - -- Initial API client implementation. - -### Types - -- Initial API types implementation. diff --git a/vendor/github.com/docker/engine-api/MAINTAINERS b/vendor/github.com/docker/engine-api/MAINTAINERS deleted file mode 100644 index f86ffe0b7ea..00000000000 --- a/vendor/github.com/docker/engine-api/MAINTAINERS +++ /dev/null @@ -1,160 +0,0 @@ -# engine-api maintainers file -# -# This file describes who runs the docker/engine-api project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "aaronlehmann", - "calavera", - "coolljt0725", - "cpuguy83", - "crosbymichael", - "dnephin", - "dongluochen", - "duglin", - "estesp", - "icecrime", - "jhowardmsft", - "lk4d4", - "mavenugo", - "mhbauer", - "runcom", - "stevvooe", - "thajeztah", - "tianon", - "tibor", - "tonistiigi", - "unclejack", - "vdemeester", - "vieux" - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" - - [people.coolljt0725] - Name = "Lei Jitang" - Email = "leijitang@huawei.com" - GitHub = "coolljt0725" - - [people.cpuguy83] - Name = "Brian Goff" - Email = "cpuguy83@gmail.com" - Github = "cpuguy83" - - [people.crosbymichael] - Name = "Michael Crosby" - Email = "crosbymichael@gmail.com" - GitHub = "crosbymichael" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.dongluochen] - Name = "Dongluo Chen" - Email = "dongluo.chen@docker.com" - GitHub = "dongluochen" - - [people.duglin] - Name = "Doug Davis" - Email = "dug@us.ibm.com" - GitHub = "duglin" - - [people.estesp] - Name = "Phil Estes" - Email = "estesp@linux.vnet.ibm.com" - GitHub = "estesp" - - [people.icecrime] - Name = "Arnaud Porterie" - Email = "arnaud@docker.com" - GitHub = "icecrime" - - [people.jhowardmsft] - Name = "John Howard" - Email = "jhoward@microsoft.com" - GitHub = "jhowardmsft" - - [people.lk4d4] - Name = "Alexander Morozov" - Email = "lk4d4@docker.com" - GitHub = "lk4d4" - - [people.mavenugo] - Name = "Madhu Venugopal" - Email = "madhu@docker.com" - GitHub = "mavenugo" - - [people.mhbauer] - Name = "Morgan Bauer" - Email = "mbauer@us.ibm.com" - GitHub = "mhbauer" - - [people.runcom] - Name = "Antonio Murdaca" - Email = "runcom@redhat.com" - GitHub = "runcom" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.tianon] - Name = "Tianon Gravi" - Email = "admwiggin@gmail.com" - GitHub = "tianon" - - [people.tibor] - Name = "Tibor Vass" - Email = "tibor@docker.com" - GitHub = "tiborvass" - - [people.tonistiigi] - Name = "Tõnis Tiigi" - Email = "tonis@docker.com" - GitHub = "tonistiigi" - - [people.unclejack] - Name = "Cristian Staretu" - Email = "cristian.staretu@gmail.com" - GitHub = "unclejack" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" - - [people.vieux] - Name = "Victor Vieux" - Email = "vieux@docker.com" - GitHub = "vieux" diff --git a/vendor/github.com/docker/engine-api/Makefile b/vendor/github.com/docker/engine-api/Makefile deleted file mode 100644 index 51b0ca317a6..00000000000 --- a/vendor/github.com/docker/engine-api/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -.PHONY: all deps test validate lint - -all: deps test validate - -deps: - go get -t ./... - go get github.com/golang/lint/golint - -test: - go test -race -cover ./... - -validate: lint - go vet ./... - test -z "$(gofmt -s -l . | tee /dev/stderr)" - -lint: - out="$$(golint ./...)"; \ - if [ -n "$$(golint ./...)" ]; then \ - echo "$$out"; \ - exit 1; \ - fi diff --git a/vendor/github.com/docker/engine-api/README.md b/vendor/github.com/docker/engine-api/README.md deleted file mode 100644 index 897dbdf9e82..00000000000 --- a/vendor/github.com/docker/engine-api/README.md +++ /dev/null @@ -1,68 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/engine-api?status.svg)](https://godoc.org/github.com/docker/engine-api) - -# Introduction - -Engine-api is a set of Go libraries to implement client and server components compatible with the Docker engine. -The code was extracted from the [Docker engine](https://github.com/docker/docker) and contributed back as an external library. - -## Components - -### Client - -The client package implements a fully featured http client to interact with the Docker engine. It's modeled after the requirements of the Docker engine CLI, but it can also serve other purposes. - -#### Usage - -You can use this client package in your applications by creating a new client object. Then use that object to execute operations against the remote server. Follow the example below to see how to list all the containers running in a Docker engine host: - -```go -package main - -import ( - "fmt" - - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func main() { - defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"} - cli, err := client.NewClient("unix:///var/run/docker.sock", "v1.22", nil, defaultHeaders) - if err != nil { - panic(err) - } - - options := types.ContainerListOptions{All: true} - containers, err := cli.ContainerList(context.Background(), options) - if err != nil { - panic(err) - } - - for _, c := range containers { - fmt.Println(c.ID) - } -} -``` - -### Types - -The types package includes all typed structures that client and server serialize to execute operations. - -### Server - -The server package includes API endpoints that applications compatible with the Docker engine API can reuse. It also provides useful middlewares and helpers to handle http requests. - -This package is still pending to be extracted from the Docker engine. - -## Developing - -engine-api requires some minimal libraries that you can download running `make deps`. - -To run tests, use the command `make test`. We use build tags to isolate functions and structures that are only available for testing. - -To validate the sources, use the command `make validate`. - -## License - -engine-api is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/engine-api/appveyor.yml b/vendor/github.com/docker/engine-api/appveyor.yml deleted file mode 100644 index b31137f3e1c..00000000000 --- a/vendor/github.com/docker/engine-api/appveyor.yml +++ /dev/null @@ -1,37 +0,0 @@ -version: "{build}" - -# Source Config -clone_folder: c:\gopath\src\github.com\docker\engine-api - -# Build host - -environment: - GOPATH: c:\gopath - GOVERSION: 1.6 - -init: - - git config --global core.autocrlf input - -# Build - -install: - # Install Go 1.6. - - rmdir c:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi - - msiexec /i go%GOVERSION%.windows-amd64.msi /q - - set Path=c:\go\bin;c:\gopath\bin;%Path% - - go version - - go env - -build: false -deploy: false - -before_test: - - go get -t ./... - - go get github.com/golang/lint/golint - -test_script: - - go vet ./... - - golint ./... - - gofmt -s -l . - - go test -race -cover -v -tags=test ./... diff --git a/vendor/github.com/docker/engine-api/client/client.go b/vendor/github.com/docker/engine-api/client/client.go deleted file mode 100644 index 8c8c6fd1828..00000000000 --- a/vendor/github.com/docker/engine-api/client/client.go +++ /dev/null @@ -1,141 +0,0 @@ -package client - -import ( - "fmt" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/docker/engine-api/client/transport" - "github.com/docker/go-connections/tlsconfig" -) - -// Client is the API client that performs all operations -// against a docker server. -type Client struct { - // proto holds the client protocol i.e. unix. - proto string - // addr holds the client address. - addr string - // basePath holds the path to prepend to the requests. - basePath string - // transport is the interface to send request with, it implements transport.Client. - transport transport.Client - // version of the server to talk to. - version string - // custom http headers configured by users. - customHTTPHeaders map[string]string -} - -// NewEnvClient initializes a new API client based on environment variables. -// Use DOCKER_HOST to set the url to the docker server. -// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// Use DOCKER_CERT_PATH to load the tls certificates from. -// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. -func NewEnvClient() (*Client, error) { - var client *http.Client - if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return nil, err - } - - client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsc, - }, - } - } - - host := os.Getenv("DOCKER_HOST") - if host == "" { - host = DefaultDockerHost - } - return NewClient(host, os.Getenv("DOCKER_API_VERSION"), client, nil) -} - -// NewClient initializes a new API client for the given host and API version. -// It won't send any version information if the version number is empty. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - proto, addr, basePath, err := ParseHost(host) - if err != nil { - return nil, err - } - - transport, err := transport.NewTransportWithHTTP(proto, addr, client) - if err != nil { - return nil, err - } - - return &Client{ - proto: proto, - addr: addr, - basePath: basePath, - transport: transport, - version: version, - customHTTPHeaders: httpHeaders, - }, nil -} - -// getAPIPath returns the versioned request path to call the api. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(p string, query url.Values) string { - var apiPath string - if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) - } else { - apiPath = fmt.Sprintf("%s%s", cli.basePath, p) - } - - u := &url.URL{ - Path: apiPath, - } - if len(query) > 0 { - u.RawQuery = query.Encode() - } - return u.String() -} - -// ClientVersion returns the version string associated with this -// instance of the Client. Note that this value can be changed -// via the DOCKER_API_VERSION env var. -func (cli *Client) ClientVersion() string { - return cli.version -} - -// UpdateClientVersion updates the version string associated with this -// instance of the Client. -func (cli *Client) UpdateClientVersion(v string) { - cli.version = v -} - -// ParseHost verifies that the given host strings is valid. -func ParseHost(host string) (string, string, string, error) { - protoAddrParts := strings.SplitN(host, "://", 2) - if len(protoAddrParts) == 1 { - return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) - } - - var basePath string - proto, addr := protoAddrParts[0], protoAddrParts[1] - if proto == "tcp" { - parsed, err := url.Parse("tcp://" + addr) - if err != nil { - return "", "", "", err - } - addr = parsed.Host - basePath = parsed.Path - } - return proto, addr, basePath, nil -} diff --git a/vendor/github.com/docker/engine-api/client/client_darwin.go b/vendor/github.com/docker/engine-api/client/client_darwin.go deleted file mode 100644 index 4b47a178c48..00000000000 --- a/vendor/github.com/docker/engine-api/client/client_darwin.go +++ /dev/null @@ -1,4 +0,0 @@ -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "tcp://127.0.0.1:2375" diff --git a/vendor/github.com/docker/engine-api/client/client_mock_test.go b/vendor/github.com/docker/engine-api/client/client_mock_test.go deleted file mode 100644 index f13c0a3f99b..00000000000 --- a/vendor/github.com/docker/engine-api/client/client_mock_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package client - -import ( - "bytes" - "crypto/tls" - "io/ioutil" - "net/http" - - "github.com/docker/engine-api/client/transport" -) - -type mockClient struct { - do func(*http.Request) (*http.Response, error) -} - -// TLSConfig returns the TLS configuration. -func (m *mockClient) TLSConfig() *tls.Config { - return &tls.Config{} -} - -// Scheme returns protocol scheme to use. -func (m *mockClient) Scheme() string { - return "http" -} - -// Secure returns true if there is a TLS configuration. -func (m *mockClient) Secure() bool { - return false -} - -// NewMockClient returns a mocked client that runs the function supplied as `client.Do` call -func newMockClient(tlsConfig *tls.Config, doer func(*http.Request) (*http.Response, error)) transport.Client { - if tlsConfig != nil { - panic("this actually gets set!") - } - - return &mockClient{ - do: doer, - } -} - -// Do executes the supplied function for the mock. -func (m mockClient) Do(req *http.Request) (*http.Response, error) { - return m.do(req) -} - -func errorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { - return func(req *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: statusCode, - Body: ioutil.NopCloser(bytes.NewReader([]byte(message))), - }, nil - } -} diff --git a/vendor/github.com/docker/engine-api/client/client_test.go b/vendor/github.com/docker/engine-api/client/client_test.go deleted file mode 100644 index 4569854482d..00000000000 --- a/vendor/github.com/docker/engine-api/client/client_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestGetAPIPath(t *testing.T) { - cases := []struct { - v string - p string - q url.Values - e string - }{ - {"", "/containers/json", nil, "/containers/json"}, - {"", "/containers/json", url.Values{}, "/containers/json"}, - {"", "/containers/json", url.Values{"s": []string{"c"}}, "/containers/json?s=c"}, - {"1.22", "/containers/json", nil, "/v1.22/containers/json"}, - {"1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, - {"1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, - {"v1.22", "/containers/json", nil, "/v1.22/containers/json"}, - {"v1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, - {"v1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, - {"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"}, - } - - for _, cs := range cases { - c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil) - if err != nil { - t.Fatal(err) - } - g := c.getAPIPath(cs.p, cs.q) - if g != cs.e { - t.Fatalf("Expected %s, got %s", cs.e, g) - } - } -} - -func TestParseHost(t *testing.T) { - cases := []struct { - host string - proto string - addr string - base string - err bool - }{ - {"", "", "", "", true}, - {"foobar", "", "", "", true}, - {"foo://bar", "foo", "bar", "", false}, - {"tcp://localhost:2476", "tcp", "localhost:2476", "", false}, - {"tcp://localhost:2476/path", "tcp", "localhost:2476", "/path", false}, - } - - for _, cs := range cases { - p, a, b, e := ParseHost(cs.host) - if cs.err && e == nil { - t.Fatalf("expected error, got nil") - } - if !cs.err && e != nil { - t.Fatal(e) - } - if cs.proto != p { - t.Fatalf("expected proto %s, got %s", cs.proto, p) - } - if cs.addr != a { - t.Fatalf("expected addr %s, got %s", cs.addr, a) - } - if cs.base != b { - t.Fatalf("expected base %s, got %s", cs.base, b) - } - } -} - -func TestUpdateClientVersion(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - splitQuery := strings.Split(req.URL.Path, "/") - queryVersion := splitQuery[1] - b, err := json.Marshal(types.Version{ - APIVersion: queryVersion, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - cases := []struct { - v string - }{ - {"1.20"}, - {"v1.21"}, - {"1.22"}, - {"v1.22"}, - } - - for _, cs := range cases { - client.UpdateClientVersion(cs.v) - r, err := client.ServerVersion(context.Background()) - if err != nil { - t.Fatal(err) - } - if strings.TrimPrefix(r.APIVersion, "v") != strings.TrimPrefix(cs.v, "v") { - t.Fatalf("Expected %s, got %s", cs.v, r.APIVersion) - } - } -} diff --git a/vendor/github.com/docker/engine-api/client/client_unix.go b/vendor/github.com/docker/engine-api/client/client_unix.go deleted file mode 100644 index 572c5f87a78..00000000000 --- a/vendor/github.com/docker/engine-api/client/client_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux freebsd solaris openbsd - -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/docker/engine-api/client/client_windows.go b/vendor/github.com/docker/engine-api/client/client_windows.go deleted file mode 100644 index 07c0c7a7749..00000000000 --- a/vendor/github.com/docker/engine-api/client/client_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/docker/engine-api/client/container_attach.go b/vendor/github.com/docker/engine-api/client/container_attach.go deleted file mode 100644 index 1b616bf0385..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_attach.go +++ /dev/null @@ -1,34 +0,0 @@ -package client - -import ( - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerAttach attaches a connection to a container in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { - query := url.Values{} - if options.Stream { - query.Set("stream", "1") - } - if options.Stdin { - query.Set("stdin", "1") - } - if options.Stdout { - query.Set("stdout", "1") - } - if options.Stderr { - query.Set("stderr", "1") - } - if options.DetachKeys != "" { - query.Set("detachKeys", options.DetachKeys) - } - - headers := map[string][]string{"Content-Type": {"text/plain"}} - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) -} diff --git a/vendor/github.com/docker/engine-api/client/container_commit.go b/vendor/github.com/docker/engine-api/client/container_commit.go deleted file mode 100644 index d5c47499066..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_commit.go +++ /dev/null @@ -1,53 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "net/url" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" - "golang.org/x/net/context" -) - -// ContainerCommit applies changes into a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { - var repository, tag string - if options.Reference != "" { - distributionRef, err := distreference.ParseNamed(options.Reference) - if err != nil { - return types.ContainerCommitResponse{}, err - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return types.ContainerCommitResponse{}, errors.New("refusing to create a tag with a digest reference") - } - - tag = reference.GetTagFromNamedRef(distributionRef) - repository = distributionRef.Name() - } - - query := url.Values{} - query.Set("container", container) - query.Set("repo", repository) - query.Set("tag", tag) - query.Set("comment", options.Comment) - query.Set("author", options.Author) - for _, change := range options.Changes { - query.Add("changes", change) - } - if options.Pause != true { - query.Set("pause", "0") - } - - var response types.ContainerCommitResponse - resp, err := cli.post(ctx, "/commit", query, options.Config, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_commit_test.go b/vendor/github.com/docker/engine-api/client/container_commit_test.go deleted file mode 100644 index c2b1b0bb48e..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_commit_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestContainerCommitError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerCommit(context.Background(), "nothing", types.ContainerCommitOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerCommit(t *testing.T) { - expectedContainerID := "container_id" - specifiedReference := "repository_name:tag" - expectedRepositoryName := "repository_name" - expectedTag := "tag" - expectedComment := "comment" - expectedAuthor := "author" - expectedChanges := []string{"change1", "change2"} - - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - query := req.URL.Query() - containerID := query.Get("container") - if containerID != expectedContainerID { - return nil, fmt.Errorf("container id not set in URL query properly. Expected '%s', got %s", expectedContainerID, containerID) - } - repo := query.Get("repo") - if repo != expectedRepositoryName { - return nil, fmt.Errorf("container repo not set in URL query properly. Expected '%s', got %s", expectedRepositoryName, repo) - } - tag := query.Get("tag") - if tag != expectedTag { - return nil, fmt.Errorf("container tag not set in URL query properly. Expected '%s', got %s'", expectedTag, tag) - } - comment := query.Get("comment") - if comment != expectedComment { - return nil, fmt.Errorf("container comment not set in URL query properly. Expected '%s', got %s'", expectedComment, comment) - } - author := query.Get("author") - if author != expectedAuthor { - return nil, fmt.Errorf("container author not set in URL query properly. Expected '%s', got %s'", expectedAuthor, author) - } - pause := query.Get("pause") - if pause != "0" { - return nil, fmt.Errorf("container pause not set in URL query properly. Expected 'true', got %v'", pause) - } - changes := query["changes"] - if len(changes) != len(expectedChanges) { - return nil, fmt.Errorf("expected container changes size to be '%d', got %d", len(expectedChanges), len(changes)) - } - b, err := json.Marshal(types.ContainerCommitResponse{ - ID: "new_container_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - r, err := client.ContainerCommit(context.Background(), expectedContainerID, types.ContainerCommitOptions{ - Reference: specifiedReference, - Comment: expectedComment, - Author: expectedAuthor, - Changes: expectedChanges, - Pause: false, - }) - if err != nil { - t.Fatal(err) - } - if r.ID != "new_container_id" { - t.Fatalf("expected `container_id`, got %s", r.ID) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_copy.go b/vendor/github.com/docker/engine-api/client/container_copy.go deleted file mode 100644 index d3dd0b116c0..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_copy.go +++ /dev/null @@ -1,97 +0,0 @@ -package client - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path/filepath" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -// ContainerStatPath returns Stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - - urlStr := fmt.Sprintf("/containers/%s/archive", containerID) - response, err := cli.head(ctx, urlStr, query, nil) - if err != nil { - return types.ContainerPathStat{}, err - } - defer ensureReaderClosed(response) - return getContainerPathStatFromHeader(response.header) -} - -// CopyToContainer copies content into the container filesystem. -func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. - if !options.AllowOverwriteDirWithFile { - query.Set("noOverwriteDirNonDir", "true") - } - - apiPath := fmt.Sprintf("/containers/%s/archive", container) - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) - if err != nil { - return err - } - defer ensureReaderClosed(response) - - if response.statusCode != http.StatusOK { - return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - return nil -} - -// CopyFromContainer gets the content from the container and returns it as a Reader -// to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { - query := make(url.Values, 1) - query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - - apiPath := fmt.Sprintf("/containers/%s/archive", container) - response, err := cli.get(ctx, apiPath, query, nil) - if err != nil { - return nil, types.ContainerPathStat{}, err - } - - if response.statusCode != http.StatusOK { - return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - // In order to get the copy behavior right, we need to know information - // about both the source and the destination. The response headers include - // stat info about the source that we can use in deciding exactly how to - // copy it locally. Along with the stat info about the local destination, - // we have everything we need to handle the multiple possibilities there - // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) - if err != nil { - return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) - } - return response.body, stat, err -} - -func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { - var stat types.ContainerPathStat - - encodedStat := header.Get("X-Docker-Container-Path-Stat") - statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) - - err := json.NewDecoder(statDecoder).Decode(&stat) - if err != nil { - err = fmt.Errorf("unable to decode container path stat header: %s", err) - } - - return stat, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_copy_test.go b/vendor/github.com/docker/engine-api/client/container_copy_test.go deleted file mode 100644 index c73c09cf070..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_copy_test.go +++ /dev/null @@ -1,244 +0,0 @@ -package client - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -func TestContainerStatPathError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerStatPath(context.Background(), "container_id", "path") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestContainerStatPathNoHeaderError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - _, err := client.ContainerStatPath(context.Background(), "container_id", "path/to/file") - if err == nil { - t.Fatalf("expected an error, got nothing") - } -} - -func TestContainerStatPath(t *testing.T) { - expectedURL := "/containers/container_id/archive" - expectedPath := "path/to/file" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "HEAD" { - return nil, fmt.Errorf("expected HEAD method, got %s", req.Method) - } - query := req.URL.Query() - path := query.Get("path") - if path != expectedPath { - return nil, fmt.Errorf("path not set in URL query properly") - } - content, err := json.Marshal(types.ContainerPathStat{ - Name: "name", - Mode: 0700, - }) - if err != nil { - return nil, err - } - base64PathStat := base64.StdEncoding.EncodeToString(content) - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - Header: http.Header{ - "X-Docker-Container-Path-Stat": []string{base64PathStat}, - }, - }, nil - }), - } - stat, err := client.ContainerStatPath(context.Background(), "container_id", expectedPath) - if err != nil { - t.Fatal(err) - } - if stat.Name != "name" { - t.Fatalf("expected container path stat name to be 'name', was '%s'", stat.Name) - } - if stat.Mode != 0700 { - t.Fatalf("expected container path stat mode to be 0700, was '%v'", stat.Mode) - } -} - -func TestCopyToContainerError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestCopyToContainerNotStatusOKError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusNoContent, "No content")), - } - err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) - if err == nil || err.Error() != "unexpected status code from daemon: 204" { - t.Fatalf("expected a unexpected status code error, got %v", err) - } -} - -func TestCopyToContainer(t *testing.T) { - expectedURL := "/containers/container_id/archive" - expectedPath := "path/to/file" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "PUT" { - return nil, fmt.Errorf("expected PUT method, got %s", req.Method) - } - query := req.URL.Query() - path := query.Get("path") - if path != expectedPath { - return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) - } - noOverwriteDirNonDir := query.Get("noOverwriteDirNonDir") - if noOverwriteDirNonDir != "true" { - return nil, fmt.Errorf("noOverwriteDirNonDir not set in URL query properly, expected true, got %s", noOverwriteDirNonDir) - } - - content, err := ioutil.ReadAll(req.Body) - if err != nil { - return nil, err - } - if err := req.Body.Close(); err != nil { - return nil, err - } - if string(content) != "content" { - return nil, fmt.Errorf("expected content to be 'content', got %s", string(content)) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), types.CopyToContainerOptions{ - AllowOverwriteDirWithFile: false, - }) - if err != nil { - t.Fatal(err) - } -} - -func TestCopyFromContainerError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestCopyFromContainerNotStatusOKError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusNoContent, "No content")), - } - _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") - if err == nil || err.Error() != "unexpected status code from daemon: 204" { - t.Fatalf("expected a unexpected status code error, got %v", err) - } -} - -func TestCopyFromContainerNoHeaderError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") - if err == nil { - t.Fatalf("expected an error, got nothing") - } -} - -func TestCopyFromContainer(t *testing.T) { - expectedURL := "/containers/container_id/archive" - expectedPath := "path/to/file" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "GET" { - return nil, fmt.Errorf("expected PUT method, got %s", req.Method) - } - query := req.URL.Query() - path := query.Get("path") - if path != expectedPath { - return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) - } - - headercontent, err := json.Marshal(types.ContainerPathStat{ - Name: "name", - Mode: 0700, - }) - if err != nil { - return nil, err - } - base64PathStat := base64.StdEncoding.EncodeToString(headercontent) - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("content"))), - Header: http.Header{ - "X-Docker-Container-Path-Stat": []string{base64PathStat}, - }, - }, nil - }), - } - r, stat, err := client.CopyFromContainer(context.Background(), "container_id", expectedPath) - if err != nil { - t.Fatal(err) - } - if stat.Name != "name" { - t.Fatalf("expected container path stat name to be 'name', was '%s'", stat.Name) - } - if stat.Mode != 0700 { - t.Fatalf("expected container path stat mode to be 0700, was '%v'", stat.Mode) - } - content, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - if err := r.Close(); err != nil { - t.Fatal(err) - } - if string(content) != "content" { - t.Fatalf("expected content to be 'content', got %s", string(content)) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_create.go b/vendor/github.com/docker/engine-api/client/container_create.go deleted file mode 100644 index 98935794dad..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_create.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strings" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" - "golang.org/x/net/context" -) - -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - -// ContainerCreate creates a new container based in the given configuration. -// It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) { - var response types.ContainerCreateResponse - query := url.Values{} - if containerName != "" { - query.Set("name", containerName) - } - - body := configWrapper{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - } - - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - if err != nil { - if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, imageNotFoundError{config.Image} - } - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_create_test.go b/vendor/github.com/docker/engine-api/client/container_create_test.go deleted file mode 100644 index 04ea9d9e859..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_create_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "testing" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "golang.org/x/net/context" -) - -func TestContainerCreateError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } - - // 404 doesn't automagitally means an unknown image - client = &Client{ - transport: newMockClient(nil, errorMock(http.StatusNotFound, "Server error")), - } - _, err = client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerCreateImageNotFound(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusNotFound, "No such image")), - } - _, err := client.ContainerCreate(context.Background(), &container.Config{Image: "unknown_image"}, nil, nil, "unknown") - if err == nil || !IsErrImageNotFound(err) { - t.Fatalf("expected a imageNotFound error, got %v", err) - } -} - -func TestContainerCreateWithName(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - name := req.URL.Query().Get("name") - if name != "container_name" { - return nil, fmt.Errorf("container name not set in URL query properly. Expected `container_name`, got %s", name) - } - b, err := json.Marshal(types.ContainerCreateResponse{ - ID: "container_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - r, err := client.ContainerCreate(context.Background(), nil, nil, nil, "container_name") - if err != nil { - t.Fatal(err) - } - if r.ID != "container_id" { - t.Fatalf("expected `container_id`, got %s", r.ID) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_diff.go b/vendor/github.com/docker/engine-api/client/container_diff.go deleted file mode 100644 index f4bb3a46b99..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { - var changes []types.ContainerChange - - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - if err != nil { - return changes, err - } - - err = json.NewDecoder(serverResp.body).Decode(&changes) - ensureReaderClosed(serverResp) - return changes, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_diff_test.go b/vendor/github.com/docker/engine-api/client/container_diff_test.go deleted file mode 100644 index 11a9d70cfb1..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_diff_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestContainerDiffError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerDiff(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } - -} - -func TestContainerDiff(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - b, err := json.Marshal([]types.ContainerChange{ - { - Kind: 0, - Path: "/path/1", - }, - { - Kind: 1, - Path: "/path/2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - changes, err := client.ContainerDiff(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } - if len(changes) != 2 { - t.Fatalf("expected an array of 2 changes, got %v", changes) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_exec.go b/vendor/github.com/docker/engine-api/client/container_exec.go deleted file mode 100644 index ff7e1a9d059..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_exec.go +++ /dev/null @@ -1,49 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) { - var response types.ContainerExecCreateResponse - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) - if err != nil { - return response, err - } - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} - -// ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { - resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) - ensureReaderClosed(resp) - return err -} - -// ContainerExecAttach attaches a connection to an exec process in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { - headers := map[string][]string{"Content-Type": {"application/json"}} - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) -} - -// ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { - var response types.ContainerExecInspect - resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_exec_test.go b/vendor/github.com/docker/engine-api/client/container_exec_test.go deleted file mode 100644 index 9518b603e96..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_exec_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -func TestContainerExecCreateError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerExecCreate(t *testing.T) { - expectedURL := "/containers/container_id/exec" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - // FIXME validate the content is the given ExecConfig ? - if err := req.ParseForm(); err != nil { - return nil, err - } - execConfig := &types.ExecConfig{} - if err := json.NewDecoder(req.Body).Decode(execConfig); err != nil { - return nil, err - } - if execConfig.User != "user" { - return nil, fmt.Errorf("expected an execConfig with User == 'user', got %v", execConfig) - } - b, err := json.Marshal(types.ContainerExecCreateResponse{ - ID: "exec_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - r, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{ - User: "user", - }) - if err != nil { - t.Fatal(err) - } - if r.ID != "exec_id" { - t.Fatalf("expected `exec_id`, got %s", r.ID) - } -} - -func TestContainerExecStartError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerExecStart(context.Background(), "nothing", types.ExecStartCheck{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerExecStart(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if err := req.ParseForm(); err != nil { - return nil, err - } - execStartCheck := &types.ExecStartCheck{} - if err := json.NewDecoder(req.Body).Decode(execStartCheck); err != nil { - return nil, err - } - if execStartCheck.Tty || !execStartCheck.Detach { - return nil, fmt.Errorf("expected execStartCheck{Detach:true,Tty:false}, got %v", execStartCheck) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerExecStart(context.Background(), "exec_id", types.ExecStartCheck{ - Detach: true, - Tty: false, - }) - if err != nil { - t.Fatal(err) - } -} - -func TestContainerExecInspectError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerExecInspect(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerExecInspect(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - b, err := json.Marshal(types.ContainerExecInspect{ - ExecID: "exec_id", - ContainerID: "container_id", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - inspect, err := client.ContainerExecInspect(context.Background(), "exec_id") - if err != nil { - t.Fatal(err) - } - if inspect.ExecID != "exec_id" { - t.Fatalf("expected ExecID to be `exec_id`, got %s", inspect.ExecID) - } - if inspect.ContainerID != "container_id" { - t.Fatalf("expected ContainerID `container_id`, got %s", inspect.ContainerID) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_export.go b/vendor/github.com/docker/engine-api/client/container_export.go deleted file mode 100644 index 52194f3d342..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_export.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" -) - -// ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller -// to close the stream. -func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) - if err != nil { - return nil, err - } - - return serverResp.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/container_export_test.go b/vendor/github.com/docker/engine-api/client/container_export_test.go deleted file mode 100644 index 4efe0e2992f..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_export_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -import ( - "net/http" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerExportError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerExport(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_inspect.go b/vendor/github.com/docker/engine-api/client/container_inspect.go deleted file mode 100644 index afd71eefcb0..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_inspect.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, err - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} - -// ContainerInspectWithRaw returns the container information and it's raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, nil, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, nil, err - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - var response types.ContainerJSON - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} - -func (cli *Client) containerInspectWithResponse(ctx context.Context, containerID string, query url.Values) (types.ContainerJSON, *serverResponse, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - if err != nil { - return types.ContainerJSON{}, serverResp, err - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, serverResp, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_inspect_test.go b/vendor/github.com/docker/engine-api/client/container_inspect_test.go deleted file mode 100644 index 9fe7f2b144a..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_inspect_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestContainerInspectError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.ContainerInspect(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerInspectContainerNotFound(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusNotFound, "Server error")), - } - - _, err := client.ContainerInspect(context.Background(), "unknown") - if err == nil || !IsErrContainerNotFound(err) { - t.Fatalf("expected a containerNotFound error, got %v", err) - } -} - -func TestContainerInspect(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - content, err := json.Marshal(types.ContainerJSON{ - ContainerJSONBase: &types.ContainerJSONBase{ - ID: "container_id", - Image: "image", - Name: "name", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - r, err := client.ContainerInspect(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } - if r.ID != "container_id" { - t.Fatalf("expected `container_id`, got %s", r.ID) - } - if r.Image != "image" { - t.Fatalf("expected `image`, got %s", r.ID) - } - if r.Name != "name" { - t.Fatalf("expected `name`, got %s", r.ID) - } -} - -func TestContainerInspectNode(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - content, err := json.Marshal(types.ContainerJSON{ - ContainerJSONBase: &types.ContainerJSONBase{ - ID: "container_id", - Image: "image", - Name: "name", - Node: &types.ContainerNode{ - ID: "container_node_id", - Addr: "container_node", - Labels: map[string]string{"foo": "bar"}, - }, - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - r, err := client.ContainerInspect(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } - if r.ID != "container_id" { - t.Fatalf("expected `container_id`, got %s", r.ID) - } - if r.Image != "image" { - t.Fatalf("expected `image`, got %s", r.ID) - } - if r.Name != "name" { - t.Fatalf("expected `name`, got %s", r.ID) - } - if r.Node.ID != "container_node_id" { - t.Fatalf("expected `container_node_id`, got %s", r.Node.ID) - } - if r.Node.Addr != "container_node" { - t.Fatalf("expected `container_node`, got %s", r.Node.Addr) - } - foo, ok := r.Node.Labels["foo"] - if foo != "bar" || !ok { - t.Fatalf("expected `bar` for label `foo`") - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_kill.go b/vendor/github.com/docker/engine-api/client/container_kill.go deleted file mode 100644 index 29f80c73ade..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_kill.go +++ /dev/null @@ -1,17 +0,0 @@ -package client - -import ( - "net/url" - - "golang.org/x/net/context" -) - -// ContainerKill terminates the container process but does not remove the container from the docker host. -func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { - query := url.Values{} - query.Set("signal", signal) - - resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_kill_test.go b/vendor/github.com/docker/engine-api/client/container_kill_test.go deleted file mode 100644 index 5697c58fc72..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_kill_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerKillError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerKill(context.Background(), "nothing", "SIGKILL") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerKill(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - signal := req.URL.Query().Get("signal") - if signal != "SIGKILL" { - return nil, fmt.Errorf("signal not set in URL query properly. Expected 'SIGKILL', got %s", signal) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerKill(context.Background(), "container_id", "SIGKILL") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_list.go b/vendor/github.com/docker/engine-api/client/container_list.go deleted file mode 100644 index 87f7333dc7b..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_list.go +++ /dev/null @@ -1,56 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { - query := url.Values{} - - if options.All { - query.Set("all", "1") - } - - if options.Limit != -1 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Since != "" { - query.Set("since", options.Since) - } - - if options.Before != "" { - query.Set("before", options.Before) - } - - if options.Size { - query.Set("size", "1") - } - - if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filter) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/containers/json", query, nil) - if err != nil { - return nil, err - } - - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) - ensureReaderClosed(resp) - return containers, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_list_test.go b/vendor/github.com/docker/engine-api/client/container_list_test.go deleted file mode 100644 index 9eacbb0274e..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_list_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -func TestContainerListError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerList(t *testing.T) { - expectedURL := "/containers/json" - expectedFilters := `{"before":{"container":true},"label":{"label1":true,"label2":true}}` - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - all := query.Get("all") - if all != "1" { - return nil, fmt.Errorf("all not set in URL query properly. Expected '1', got %s", all) - } - limit := query.Get("limit") - if limit != "0" { - return nil, fmt.Errorf("limit should have not be present in query. Expected '0', got %s", limit) - } - since := query.Get("since") - if since != "container" { - return nil, fmt.Errorf("since not set in URL query properly. Expected 'container', got %s", since) - } - before := query.Get("before") - if before != "" { - return nil, fmt.Errorf("before should have not be present in query, go %s", before) - } - size := query.Get("size") - if size != "1" { - return nil, fmt.Errorf("size not set in URL query properly. Expected '1', got %s", size) - } - filters := query.Get("filters") - if filters != expectedFilters { - return nil, fmt.Errorf("expected filters incoherent '%v' with actual filters %v", expectedFilters, filters) - } - - b, err := json.Marshal([]types.Container{ - { - ID: "container_id1", - }, - { - ID: "container_id2", - }, - }) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - filters := filters.NewArgs() - filters.Add("label", "label1") - filters.Add("label", "label2") - filters.Add("before", "container") - containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ - Size: true, - All: true, - Since: "container", - Filter: filters, - }) - if err != nil { - t.Fatal(err) - } - if len(containers) != 2 { - t.Fatalf("expected 2 containers, got %v", containers) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_logs.go b/vendor/github.com/docker/engine-api/client/container_logs.go deleted file mode 100644 index 08b9b918764..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_logs.go +++ /dev/null @@ -1,52 +0,0 @@ -package client - -import ( - "io" - "net/url" - "time" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - timetypes "github.com/docker/engine-api/types/time" -) - -// ContainerLogs returns the logs generated by a container in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/container_logs_test.go b/vendor/github.com/docker/engine-api/client/container_logs_test.go deleted file mode 100644 index 0133a7487b3..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_logs_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package client - -import ( - "io" - "log" - "net/http" - "os" - "testing" - "time" - - "github.com/docker/engine-api/types" - - "golang.org/x/net/context" -) - -func TestContainerLogsError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func ExampleClient_ContainerLogs_withTimeout() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - client, _ := NewEnvClient() - reader, err := client.ContainerLogs(ctx, "container_id", types.ContainerLogsOptions{}) - if err != nil { - log.Fatal(err) - } - - _, err = io.Copy(os.Stdout, reader) - if err != nil && err != io.EOF { - log.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_pause.go b/vendor/github.com/docker/engine-api/client/container_pause.go deleted file mode 100644 index 412067a7821..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_pause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// ContainerPause pauses the main process of a given container without terminating it. -func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_pause_test.go b/vendor/github.com/docker/engine-api/client/container_pause_test.go deleted file mode 100644 index ebd12a6ac77..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_pause_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerPauseError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerPause(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerPause(t *testing.T) { - expectedURL := "/containers/container_id/pause" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - err := client.ContainerPause(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_remove.go b/vendor/github.com/docker/engine-api/client/container_remove.go deleted file mode 100644 index cef4b812208..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_remove.go +++ /dev/null @@ -1,27 +0,0 @@ -package client - -import ( - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { - query := url.Values{} - if options.RemoveVolumes { - query.Set("v", "1") - } - if options.RemoveLinks { - query.Set("link", "1") - } - - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_remove_test.go b/vendor/github.com/docker/engine-api/client/container_remove_test.go deleted file mode 100644 index 14e76989aa1..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_remove_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestContainerRemoveError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerRemove(t *testing.T) { - expectedURL := "/containers/container_id" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - volume := query.Get("v") - if volume != "1" { - return nil, fmt.Errorf("v (volume) not set in URL query properly. Expected '1', got %s", volume) - } - force := query.Get("force") - if force != "1" { - return nil, fmt.Errorf("force not set in URL query properly. Expected '1', got %s", force) - } - link := query.Get("link") - if link != "" { - return nil, fmt.Errorf("link should have not be present in query, go %s", link) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{ - RemoveVolumes: true, - Force: true, - }) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_rename.go b/vendor/github.com/docker/engine-api/client/container_rename.go deleted file mode 100644 index 0e718da7c6e..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_rename.go +++ /dev/null @@ -1,16 +0,0 @@ -package client - -import ( - "net/url" - - "golang.org/x/net/context" -) - -// ContainerRename changes the name of a given container. -func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { - query := url.Values{} - query.Set("name", newContainerName) - resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_rename_test.go b/vendor/github.com/docker/engine-api/client/container_rename_test.go deleted file mode 100644 index 9344bab7db3..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_rename_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerRenameError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerRename(context.Background(), "nothing", "newNothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerRename(t *testing.T) { - expectedURL := "/containers/container_id/rename" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - name := req.URL.Query().Get("name") - if name != "newName" { - return nil, fmt.Errorf("name not set in URL query properly. Expected 'newName', got %s", name) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerRename(context.Background(), "container_id", "newName") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_resize.go b/vendor/github.com/docker/engine-api/client/container_resize.go deleted file mode 100644 index b95d26b335a..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_resize.go +++ /dev/null @@ -1,29 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) -} - -// ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) -} - -func (cli *Client) resize(ctx context.Context, basePath string, height, width int) error { - query := url.Values{} - query.Set("h", strconv.Itoa(height)) - query.Set("w", strconv.Itoa(width)) - - resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_resize_test.go b/vendor/github.com/docker/engine-api/client/container_resize_test.go deleted file mode 100644 index cdc157cf208..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_resize_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestContainerResizeError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerExecResizeError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerResize(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, resizeTransport("/containers/container_id/resize")), - } - - err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{ - Height: 500, - Width: 600, - }) - if err != nil { - t.Fatal(err) - } -} - -func TestContainerExecResize(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, resizeTransport("/exec/exec_id/resize")), - } - - err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{ - Height: 500, - Width: 600, - }) - if err != nil { - t.Fatal(err) - } -} - -func resizeTransport(expectedURL string) func(req *http.Request) (*http.Response, error) { - return func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - h := query.Get("h") - if h != "500" { - return nil, fmt.Errorf("h not set in URL query properly. Expected '500', got %s", h) - } - w := query.Get("w") - if w != "600" { - return nil, fmt.Errorf("w not set in URL query properly. Expected '600', got %s", w) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_restart.go b/vendor/github.com/docker/engine-api/client/container_restart.go deleted file mode 100644 index 1c74b18ca51..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_restart.go +++ /dev/null @@ -1,19 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "golang.org/x/net/context" -) - -// ContainerRestart stops and starts a container again. -// It makes the daemon to wait for the container to be up again for -// a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout int) error { - query := url.Values{} - query.Set("t", strconv.Itoa(timeout)) - resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_restart_test.go b/vendor/github.com/docker/engine-api/client/container_restart_test.go deleted file mode 100644 index 8da73b0fe3b..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_restart_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerRestartError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerRestart(context.Background(), "nothing", 0) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerRestart(t *testing.T) { - expectedURL := "/containers/container_id/restart" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - t := req.URL.Query().Get("t") - if t != "100" { - return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerRestart(context.Background(), "container_id", 100) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_start.go b/vendor/github.com/docker/engine-api/client/container_start.go deleted file mode 100644 index 12a979422ef..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_start.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/start", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_start_test.go b/vendor/github.com/docker/engine-api/client/container_start_test.go deleted file mode 100644 index d88dc543d9d..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_start_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerStartError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerStart(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerStart(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - // we're not expecting any payload, but if one is supplied, check it is valid. - if req.Header.Get("Content-Type") == "application/json" { - var startConfig interface{} - if err := json.NewDecoder(req.Body).Decode(&startConfig); err != nil { - return nil, fmt.Errorf("Unable to parse json: %s", err) - } - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerStart(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_stats.go b/vendor/github.com/docker/engine-api/client/container_stats.go deleted file mode 100644 index 2cc67c3af17..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_stats.go +++ /dev/null @@ -1,24 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" -) - -// ContainerStats returns near realtime stats for a given container. -// It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) { - query := url.Values{} - query.Set("stream", "0") - if stream { - query.Set("stream", "1") - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return nil, err - } - return resp.body, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_stats_test.go b/vendor/github.com/docker/engine-api/client/container_stats_test.go deleted file mode 100644 index c57ead0fc27..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_stats_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -import ( - "net/http" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerStatsError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerStats(context.Background(), "nothing", false) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_stop.go b/vendor/github.com/docker/engine-api/client/container_stop.go deleted file mode 100644 index 34d786291d5..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_stop.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "golang.org/x/net/context" -) - -// ContainerStop stops a container without terminating the process. -// The process is blocked until the container stops or the timeout expires. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout int) error { - query := url.Values{} - query.Set("t", strconv.Itoa(timeout)) - resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_stop_test.go b/vendor/github.com/docker/engine-api/client/container_stop_test.go deleted file mode 100644 index 954aa5079e0..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_stop_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerStopError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerStop(context.Background(), "nothing", 0) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerStop(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - t := req.URL.Query().Get("t") - if t != "100" { - return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerStop(context.Background(), "container_id", 100) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_top.go b/vendor/github.com/docker/engine-api/client/container_top.go deleted file mode 100644 index 5ad926ae088..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_top.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strings" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { - var response types.ContainerProcessList - query := url.Values{} - if len(arguments) > 0 { - query.Set("ps_args", strings.Join(arguments, " ")) - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_top_test.go b/vendor/github.com/docker/engine-api/client/container_top_test.go deleted file mode 100644 index 22f3bd9dad3..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_top_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestContainerTopError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ContainerTop(context.Background(), "nothing", []string{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerTop(t *testing.T) { - expectedProcesses := [][]string{ - {"p1", "p2"}, - {"p3"}, - } - expectedTitles := []string{"title1", "title2"} - - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - query := req.URL.Query() - args := query.Get("ps_args") - if args != "arg1 arg2" { - return nil, fmt.Errorf("args not set in URL query properly. Expected 'arg1 arg2', got %v", args) - } - - b, err := json.Marshal(types.ContainerProcessList{ - Processes: [][]string{ - {"p1", "p2"}, - {"p3"}, - }, - Titles: []string{"title1", "title2"}, - }) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - processList, err := client.ContainerTop(context.Background(), "container_id", []string{"arg1", "arg2"}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expectedProcesses, processList.Processes) { - t.Fatalf("Processes: expected %v, got %v", expectedProcesses, processList.Processes) - } - if !reflect.DeepEqual(expectedTitles, processList.Titles) { - t.Fatalf("Titles: expected %v, got %v", expectedTitles, processList.Titles) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_unpause.go b/vendor/github.com/docker/engine-api/client/container_unpause.go deleted file mode 100644 index 5c76211256c..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_unpause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// ContainerUnpause resumes the process execution within a container -func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_unpause_test.go b/vendor/github.com/docker/engine-api/client/container_unpause_test.go deleted file mode 100644 index a5b21bf56c6..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_unpause_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestContainerUnpauseError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerUnpause(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerUnpause(t *testing.T) { - expectedURL := "/containers/container_id/unpause" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - err := client.ContainerUnpause(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_update.go b/vendor/github.com/docker/engine-api/client/container_update.go deleted file mode 100644 index a5a1826dc4a..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_update.go +++ /dev/null @@ -1,13 +0,0 @@ -package client - -import ( - "github.com/docker/engine-api/types/container" - "golang.org/x/net/context" -) - -// ContainerUpdate updates resources of a container -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_update_test.go b/vendor/github.com/docker/engine-api/client/container_update_test.go deleted file mode 100644 index 2fb0a326923..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_update_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types/container" - "golang.org/x/net/context" -) - -func TestContainerUpdateError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - err := client.ContainerUpdate(context.Background(), "nothing", container.UpdateConfig{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestContainerUpdate(t *testing.T) { - expectedURL := "/containers/container_id/update" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.ContainerUpdate(context.Background(), "container_id", container.UpdateConfig{ - Resources: container.Resources{ - CPUPeriod: 1, - }, - RestartPolicy: container.RestartPolicy{ - Name: "always", - }, - }) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/container_wait.go b/vendor/github.com/docker/engine-api/client/container_wait.go deleted file mode 100644 index c26ff3f3786..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_wait.go +++ /dev/null @@ -1,26 +0,0 @@ -package client - -import ( - "encoding/json" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -// ContainerWait pauses execution until a container exits. -// It returns the API status code as response of its readiness. -func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - return -1, err - } - defer ensureReaderClosed(resp) - - var res types.ContainerWaitResponse - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - return -1, err - } - - return res.StatusCode, nil -} diff --git a/vendor/github.com/docker/engine-api/client/container_wait_test.go b/vendor/github.com/docker/engine-api/client/container_wait_test.go deleted file mode 100644 index c402ff874e7..00000000000 --- a/vendor/github.com/docker/engine-api/client/container_wait_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/http" - "strings" - "testing" - "time" - - "github.com/docker/engine-api/types" - - "golang.org/x/net/context" -) - -func TestContainerWaitError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - code, err := client.ContainerWait(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } - if code != -1 { - t.Fatalf("expected a status code equal to '-1', got %d", code) - } -} - -func TestContainerWait(t *testing.T) { - expectedURL := "/containers/container_id/wait" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - b, err := json.Marshal(types.ContainerWaitResponse{ - StatusCode: 15, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - code, err := client.ContainerWait(context.Background(), "container_id") - if err != nil { - t.Fatal(err) - } - if code != 15 { - t.Fatalf("expected a status code equal to '15', got %d", code) - } -} - -func ExampleClient_ContainerWait_withTimeout() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - client, _ := NewEnvClient() - _, err := client.ContainerWait(ctx, "container_id") - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/errors.go b/vendor/github.com/docker/engine-api/client/errors.go deleted file mode 100644 index 17828bb7168..00000000000 --- a/vendor/github.com/docker/engine-api/client/errors.go +++ /dev/null @@ -1,94 +0,0 @@ -package client - -import ( - "errors" - "fmt" -) - -// ErrConnectionFailed is an error raised when the connection between the client and the server failed. -var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") - -// imageNotFoundError implements an error returned when an image is not in the docker host. -type imageNotFoundError struct { - imageID string -} - -// Error returns a string representation of an imageNotFoundError -func (i imageNotFoundError) Error() string { - return fmt.Sprintf("Error: No such image: %s", i.imageID) -} - -// IsErrImageNotFound returns true if the error is caused -// when an image is not found in the docker host. -func IsErrImageNotFound(err error) bool { - _, ok := err.(imageNotFoundError) - return ok -} - -// containerNotFoundError implements an error returned when a container is not in the docker host. -type containerNotFoundError struct { - containerID string -} - -// Error returns a string representation of a containerNotFoundError -func (e containerNotFoundError) Error() string { - return fmt.Sprintf("Error: No such container: %s", e.containerID) -} - -// IsErrContainerNotFound returns true if the error is caused -// when a container is not found in the docker host. -func IsErrContainerNotFound(err error) bool { - _, ok := err.(containerNotFoundError) - return ok -} - -// networkNotFoundError implements an error returned when a network is not in the docker host. -type networkNotFoundError struct { - networkID string -} - -// Error returns a string representation of a networkNotFoundError -func (e networkNotFoundError) Error() string { - return fmt.Sprintf("Error: No such network: %s", e.networkID) -} - -// IsErrNetworkNotFound returns true if the error is caused -// when a network is not found in the docker host. -func IsErrNetworkNotFound(err error) bool { - _, ok := err.(networkNotFoundError) - return ok -} - -// volumeNotFoundError implements an error returned when a volume is not in the docker host. -type volumeNotFoundError struct { - volumeID string -} - -// Error returns a string representation of a networkNotFoundError -func (e volumeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such volume: %s", e.volumeID) -} - -// IsErrVolumeNotFound returns true if the error is caused -// when a volume is not found in the docker host. -func IsErrVolumeNotFound(err error) bool { - _, ok := err.(volumeNotFoundError) - return ok -} - -// unauthorizedError represents an authorization error in a remote registry. -type unauthorizedError struct { - cause error -} - -// Error returns a string representation of an unauthorizedError -func (u unauthorizedError) Error() string { - return u.cause.Error() -} - -// IsErrUnauthorized returns true if the error is caused -// when a remote registry authentication fails -func IsErrUnauthorized(err error) bool { - _, ok := err.(unauthorizedError) - return ok -} diff --git a/vendor/github.com/docker/engine-api/client/events.go b/vendor/github.com/docker/engine-api/client/events.go deleted file mode 100644 index e379ce0a294..00000000000 --- a/vendor/github.com/docker/engine-api/client/events.go +++ /dev/null @@ -1,48 +0,0 @@ -package client - -import ( - "io" - "net/url" - "time" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - timetypes "github.com/docker/engine-api/types/time" -) - -// Events returns a stream of events in the daemon in a ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) { - query := url.Values{} - ref := time.Now() - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, ref) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, ref) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return nil, err - } - query.Set("filters", filterJSON) - } - - serverResponse, err := cli.get(ctx, "/events", query, nil) - if err != nil { - return nil, err - } - return serverResponse.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/hijack.go b/vendor/github.com/docker/engine-api/client/hijack.go deleted file mode 100644 index dbd91ef6299..00000000000 --- a/vendor/github.com/docker/engine-api/client/hijack.go +++ /dev/null @@ -1,174 +0,0 @@ -package client - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/http/httputil" - "net/url" - "strings" - "time" - - "github.com/docker/engine-api/types" - "github.com/docker/go-connections/sockets" - "golang.org/x/net/context" -) - -// tlsClientCon holds tls information and a dialed connection. -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if conn, ok := c.rawConn.(types.CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// postHijacked sends a POST request and hijacks the connection. -func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { - bodyEncoded, err := encodeData(body) - if err != nil { - return types.HijackedResponse{}, err - } - - req, err := cli.newRequest("POST", path, query, bodyEncoded, headers) - if err != nil { - return types.HijackedResponse{}, err - } - req.Host = cli.addr - - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - - conn, err := dial(cli.proto, cli.addr, cli.transport.TLSConfig()) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } - return types.HijackedResponse{}, err - } - - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - _, err = clientconn.Do(req) - - rwc, br := clientconn.Hijack() - - return types.HijackedResponse{Conn: rwc, Reader: br}, err -} - -func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { - return tlsDialWithDialer(new(net.Dialer), network, addr, config) -} - -// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in -// order to return our custom tlsClientCon struct which holds both the tls.Conn -// object _and_ its underlying raw connection. The rationale for this is that -// we need to be able to close the write end of the connection when attaching, -// which tls.Conn does not provide. -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - proxyDialer, err := sockets.DialerFromEnvironment(dialer) - if err != nil { - return nil, err - } - - rawConn, err := proxyDialer.Dial(network, addr) - if err != nil { - return nil, err - } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := rawConn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := *config - c.ServerName = hostname - config = &c - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} - -func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - // Notice this isn't Go standard's tls.Dial function - return tlsDial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) -} diff --git a/vendor/github.com/docker/engine-api/client/image_build.go b/vendor/github.com/docker/engine-api/client/image_build.go deleted file mode 100644 index 4165c4e9a84..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_build.go +++ /dev/null @@ -1,135 +0,0 @@ -package client - -import ( - "encoding/base64" - "encoding/json" - "io" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" -) - -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) - -// ImageBuild sends request to the daemon to build images. -// The Body in the response implement an io.ReadCloser and it's up to the caller to -// close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := imageBuildOptionsToQuery(options) - if err != nil { - return types.ImageBuildResponse{}, err - } - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(options.AuthConfigs) - if err != nil { - return types.ImageBuildResponse{}, err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - headers.Set("Content-Type", "application/tar") - - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) - if err != nil { - return types.ImageBuildResponse{}, err - } - - osType := getDockerOS(serverResp.header.Get("Server")) - - return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: osType, - }, nil -} - -func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - } - if options.SuppressOutput { - query.Set("q", "1") - } - if options.RemoteContext != "" { - query.Set("remote", options.RemoteContext) - } - if options.NoCache { - query.Set("nocache", "1") - } - if options.Remove { - query.Set("rm", "1") - } else { - query.Set("rm", "0") - } - - if options.ForceRemove { - query.Set("forcerm", "1") - } - - if options.PullParent { - query.Set("pull", "1") - } - - if !container.Isolation.IsDefault(options.Isolation) { - query.Set("isolation", string(options.Isolation)) - } - - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err - } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err - } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err - } - query.Set("labels", string(labelsJSON)) - return query, nil -} - -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} - -// convertKVStringsToMap converts ["key=value"] to {"key":"value"} -func convertKVStringsToMap(values []string) map[string]string { - result := make(map[string]string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = "" - } else { - result[kv[0]] = kv[1] - } - } - - return result -} diff --git a/vendor/github.com/docker/engine-api/client/image_build_test.go b/vendor/github.com/docker/engine-api/client/image_build_test.go deleted file mode 100644 index caa1ebb74d5..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_build_test.go +++ /dev/null @@ -1,230 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/go-units" -) - -func TestImageBuildError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImageBuild(context.Background(), nil, types.ImageBuildOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImageBuild(t *testing.T) { - emptyRegistryConfig := "bnVsbA==" - buildCases := []struct { - buildOptions types.ImageBuildOptions - expectedQueryParams map[string]string - expectedTags []string - expectedRegistryConfig string - }{ - { - buildOptions: types.ImageBuildOptions{ - SuppressOutput: true, - NoCache: true, - Remove: true, - ForceRemove: true, - PullParent: true, - }, - expectedQueryParams: map[string]string{ - "q": "1", - "nocache": "1", - "rm": "1", - "forcerm": "1", - "pull": "1", - }, - expectedTags: []string{}, - expectedRegistryConfig: emptyRegistryConfig, - }, - { - buildOptions: types.ImageBuildOptions{ - SuppressOutput: false, - NoCache: false, - Remove: false, - ForceRemove: false, - PullParent: false, - }, - expectedQueryParams: map[string]string{ - "q": "", - "nocache": "", - "rm": "0", - "forcerm": "", - "pull": "", - }, - expectedTags: []string{}, - expectedRegistryConfig: emptyRegistryConfig, - }, - { - buildOptions: types.ImageBuildOptions{ - RemoteContext: "remoteContext", - Isolation: container.Isolation("isolation"), - CPUSetCPUs: "2", - CPUSetMems: "12", - CPUShares: 20, - CPUQuota: 10, - CPUPeriod: 30, - Memory: 256, - MemorySwap: 512, - ShmSize: 10, - CgroupParent: "cgroup_parent", - Dockerfile: "Dockerfile", - }, - expectedQueryParams: map[string]string{ - "remote": "remoteContext", - "isolation": "isolation", - "cpusetcpus": "2", - "cpusetmems": "12", - "cpushares": "20", - "cpuquota": "10", - "cpuperiod": "30", - "memory": "256", - "memswap": "512", - "shmsize": "10", - "cgroupparent": "cgroup_parent", - "dockerfile": "Dockerfile", - "rm": "0", - }, - expectedTags: []string{}, - expectedRegistryConfig: emptyRegistryConfig, - }, - { - buildOptions: types.ImageBuildOptions{ - BuildArgs: map[string]string{ - "ARG1": "value1", - "ARG2": "value2", - }, - }, - expectedQueryParams: map[string]string{ - "buildargs": `{"ARG1":"value1","ARG2":"value2"}`, - "rm": "0", - }, - expectedTags: []string{}, - expectedRegistryConfig: emptyRegistryConfig, - }, - { - buildOptions: types.ImageBuildOptions{ - Ulimits: []*units.Ulimit{ - { - Name: "nproc", - Hard: 65557, - Soft: 65557, - }, - { - Name: "nofile", - Hard: 20000, - Soft: 40000, - }, - }, - }, - expectedQueryParams: map[string]string{ - "ulimits": `[{"Name":"nproc","Hard":65557,"Soft":65557},{"Name":"nofile","Hard":20000,"Soft":40000}]`, - "rm": "0", - }, - expectedTags: []string{}, - expectedRegistryConfig: emptyRegistryConfig, - }, - { - buildOptions: types.ImageBuildOptions{ - AuthConfigs: map[string]types.AuthConfig{ - "https://index.docker.io/v1/": { - Auth: "dG90bwo=", - }, - }, - }, - expectedQueryParams: map[string]string{ - "rm": "0", - }, - expectedTags: []string{}, - expectedRegistryConfig: "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289In19", - }, - } - for _, buildCase := range buildCases { - expectedURL := "/build" - client := &Client{ - transport: newMockClient(nil, func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - // Check request headers - registryConfig := r.Header.Get("X-Registry-Config") - if registryConfig != buildCase.expectedRegistryConfig { - return nil, fmt.Errorf("X-Registry-Config header not properly set in the request. Expected '%s', got %s", buildCase.expectedRegistryConfig, registryConfig) - } - contentType := r.Header.Get("Content-Type") - if contentType != "application/tar" { - return nil, fmt.Errorf("Content-type header not properly set in the request. Expected 'application/tar', got %s", contentType) - } - - // Check query parameters - query := r.URL.Query() - for key, expected := range buildCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - - // Check tags - if len(buildCase.expectedTags) > 0 { - tags := query["t"] - if !reflect.DeepEqual(tags, buildCase.expectedTags) { - return nil, fmt.Errorf("t (tags) not set in URL query properly. Expected '%s', got %s", buildCase.expectedTags, tags) - } - } - - headers := http.Header{} - headers.Add("Server", "Docker/v1.23 (MyOS)") - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - Header: headers, - }, nil - }), - } - buildResponse, err := client.ImageBuild(context.Background(), nil, buildCase.buildOptions) - if err != nil { - t.Fatal(err) - } - if buildResponse.OSType != "MyOS" { - t.Fatalf("expected OSType to be 'MyOS', got %s", buildResponse.OSType) - } - response, err := ioutil.ReadAll(buildResponse.Body) - if err != nil { - t.Fatal(err) - } - buildResponse.Body.Close() - if string(response) != "body" { - t.Fatalf("expected Body to contain 'body' string, got %s", response) - } - } -} - -func TestGetDockerOS(t *testing.T) { - cases := map[string]string{ - "Docker/v1.22 (linux)": "linux", - "Docker/v1.22 (windows)": "windows", - "Foo/v1.22 (bar)": "", - } - for header, os := range cases { - g := getDockerOS(header) - if g != os { - t.Fatalf("Expected %s, got %s", os, g) - } - } -} diff --git a/vendor/github.com/docker/engine-api/client/image_create.go b/vendor/github.com/docker/engine-api/client/image_create.go deleted file mode 100644 index 6dfc0391c00..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_create.go +++ /dev/null @@ -1,34 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" -) - -// ImageCreate creates a new image based in the parent options. -// It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(parentReference) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", repository) - query.Set("tag", tag) - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/create", query, nil, headers) -} diff --git a/vendor/github.com/docker/engine-api/client/image_create_test.go b/vendor/github.com/docker/engine-api/client/image_create_test.go deleted file mode 100644 index 94e19d89bbb..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_create_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -func TestImageCreateError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImageCreate(context.Background(), "reference", types.ImageCreateOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestImageCreate(t *testing.T) { - expectedURL := "/images/create" - expectedImage := "test:5000/my_image" - expectedTag := "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" - expectedReference := fmt.Sprintf("%s@%s", expectedImage, expectedTag) - expectedRegistryAuth := "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289IiwiZW1haWwiOiJqb2huQGRvZS5jb20ifX0=" - client := &Client{ - transport: newMockClient(nil, func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - registryAuth := r.Header.Get("X-Registry-Auth") - if registryAuth != expectedRegistryAuth { - return nil, fmt.Errorf("X-Registry-Auth header not properly set in the request. Expected '%s', got %s", expectedRegistryAuth, registryAuth) - } - - query := r.URL.Query() - fromImage := query.Get("fromImage") - if fromImage != expectedImage { - return nil, fmt.Errorf("fromImage not set in URL query properly. Expected '%s', got %s", expectedImage, fromImage) - } - - tag := query.Get("tag") - if tag != expectedTag { - return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", expectedTag, tag) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - createResponse, err := client.ImageCreate(context.Background(), expectedReference, types.ImageCreateOptions{ - RegistryAuth: expectedRegistryAuth, - }) - if err != nil { - t.Fatal(err) - } - response, err := ioutil.ReadAll(createResponse) - if err != nil { - t.Fatal(err) - } - if err = createResponse.Close(); err != nil { - t.Fatal(err) - } - if string(response) != "body" { - t.Fatalf("expected Body to contain 'body' string, got %s", response) - } -} diff --git a/vendor/github.com/docker/engine-api/client/image_history.go b/vendor/github.com/docker/engine-api/client/image_history.go deleted file mode 100644 index b2840b5ed84..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_history.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { - var history []types.ImageHistory - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - if err != nil { - return history, err - } - - err = json.NewDecoder(serverResp.body).Decode(&history) - ensureReaderClosed(serverResp) - return history, err -} diff --git a/vendor/github.com/docker/engine-api/client/image_history_test.go b/vendor/github.com/docker/engine-api/client/image_history_test.go deleted file mode 100644 index d3c61e49a53..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_history_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestImageHistoryError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImageHistory(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestImageHistory(t *testing.T) { - expectedURL := "/images/image_id/history" - client := &Client{ - transport: newMockClient(nil, func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - b, err := json.Marshal([]types.ImageHistory{ - { - ID: "image_id1", - Tags: []string{"tag1", "tag2"}, - }, - { - ID: "image_id2", - Tags: []string{"tag1", "tag2"}, - }, - }) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - imageHistories, err := client.ImageHistory(context.Background(), "image_id") - if err != nil { - t.Fatal(err) - } - if len(imageHistories) != 2 { - t.Fatalf("expected 2 containers, got %v", imageHistories) - } -} diff --git a/vendor/github.com/docker/engine-api/client/image_import.go b/vendor/github.com/docker/engine-api/client/image_import.go deleted file mode 100644 index 4e8749a01d5..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_import.go +++ /dev/null @@ -1,37 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types" -) - -// ImageImport creates a new image based in the source options. -// It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { - if ref != "" { - //Check if the given image name can be resolved - if _, err := reference.ParseNamed(ref); err != nil { - return nil, err - } - } - - query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) - for _, change := range options.Changes { - query.Add("changes", change) - } - - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/image_import_test.go b/vendor/github.com/docker/engine-api/client/image_import_test.go deleted file mode 100644 index b985f188f50..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_import_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestImageImportError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", types.ImageImportOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestImageImport(t *testing.T) { - expectedURL := "/images/create" - client := &Client{ - transport: newMockClient(nil, func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - query := r.URL.Query() - fromSrc := query.Get("fromSrc") - if fromSrc != "image_source" { - return nil, fmt.Errorf("fromSrc not set in URL query properly. Expected 'image_source', got %s", fromSrc) - } - repo := query.Get("repo") - if repo != "repository_name:imported" { - return nil, fmt.Errorf("repo not set in URL query properly. Expected 'repository_name', got %s", repo) - } - tag := query.Get("tag") - if tag != "imported" { - return nil, fmt.Errorf("tag not set in URL query properly. Expected 'imported', got %s", tag) - } - message := query.Get("message") - if message != "A message" { - return nil, fmt.Errorf("message not set in URL query properly. Expected 'A message', got %s", message) - } - changes := query["changes"] - expectedChanges := []string{"change1", "change2"} - if !reflect.DeepEqual(expectedChanges, changes) { - return nil, fmt.Errorf("changes not set in URL query properly. Expected %v, got %v", expectedChanges, changes) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), - }, nil - }), - } - importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{ - Source: strings.NewReader("source"), - SourceName: "image_source", - }, "repository_name:imported", types.ImageImportOptions{ - Tag: "imported", - Message: "A message", - Changes: []string{"change1", "change2"}, - }) - if err != nil { - t.Fatal(err) - } - response, err := ioutil.ReadAll(importResponse) - if err != nil { - t.Fatal(err) - } - importResponse.Close() - if string(response) != "response" { - t.Fatalf("expected response to contain 'response', got %s", string(response)) - } -} diff --git a/vendor/github.com/docker/engine-api/client/image_inspect.go b/vendor/github.com/docker/engine-api/client/image_inspect.go deleted file mode 100644 index 859ba640869..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) { - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ImageInspect{}, nil, imageNotFoundError{imageID} - } - return types.ImageInspect{}, nil, err - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ImageInspect{}, nil, err - } - - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/engine-api/client/image_inspect_test.go b/vendor/github.com/docker/engine-api/client/image_inspect_test.go deleted file mode 100644 index 65bf1b09d38..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_inspect_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestImageInspectError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - _, _, err := client.ImageInspectWithRaw(context.Background(), "nothing", true) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImageInspectImageNotFound(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusNotFound, "Server error")), - } - - _, _, err := client.ImageInspectWithRaw(context.Background(), "unknown", true) - if err == nil || !IsErrImageNotFound(err) { - t.Fatalf("expected a imageNotFound error, got %v", err) - } -} - -func TestImageInspect(t *testing.T) { - expectedURL := "/images/image_id/json" - expectedTags := []string{"tag1", "tag2"} - inspectCases := []struct { - size bool - expectedQueryParams map[string]string - }{ - { - size: true, - expectedQueryParams: map[string]string{ - "size": "1", - }, - }, - { - size: false, - expectedQueryParams: map[string]string{ - "size": "", - }, - }, - } - for _, inspectCase := range inspectCases { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range inspectCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - content, err := json.Marshal(types.ImageInspect{ - ID: "image_id", - RepoTags: expectedTags, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - imageInspect, _, err := client.ImageInspectWithRaw(context.Background(), "image_id", inspectCase.size) - if err != nil { - t.Fatal(err) - } - if imageInspect.ID != "image_id" { - t.Fatalf("expected `image_id`, got %s", imageInspect.ID) - } - if !reflect.DeepEqual(imageInspect.RepoTags, expectedTags) { - t.Fatalf("expected `%v`, got %v", expectedTags, imageInspect.RepoTags) - } - } -} diff --git a/vendor/github.com/docker/engine-api/client/image_list.go b/vendor/github.com/docker/engine-api/client/image_list.go deleted file mode 100644 index 347810e663d..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_list.go +++ /dev/null @@ -1,40 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) { - var images []types.Image - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return images, err - } - query.Set("filters", filterJSON) - } - if options.MatchName != "" { - // FIXME rename this parameter, to not be confused with the filters flag - query.Set("filter", options.MatchName) - } - if options.All { - query.Set("all", "1") - } - - serverResp, err := cli.get(ctx, "/images/json", query, nil) - if err != nil { - return images, err - } - - err = json.NewDecoder(serverResp.body).Decode(&images) - ensureReaderClosed(serverResp) - return images, err -} diff --git a/vendor/github.com/docker/engine-api/client/image_list_test.go b/vendor/github.com/docker/engine-api/client/image_list_test.go deleted file mode 100644 index 6a8a2c50570..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_list_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -func TestImageListError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.ImageList(context.Background(), types.ImageListOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImageList(t *testing.T) { - expectedURL := "/images/json" - - noDanglingfilters := filters.NewArgs() - noDanglingfilters.Add("dangling", "false") - - filters := filters.NewArgs() - filters.Add("label", "label1") - filters.Add("label", "label2") - filters.Add("dangling", "true") - - listCases := []struct { - options types.ImageListOptions - expectedQueryParams map[string]string - }{ - { - options: types.ImageListOptions{}, - expectedQueryParams: map[string]string{ - "all": "", - "filter": "", - "filters": "", - }, - }, - { - options: types.ImageListOptions{ - All: true, - MatchName: "image_name", - }, - expectedQueryParams: map[string]string{ - "all": "1", - "filter": "image_name", - "filters": "", - }, - }, - { - options: types.ImageListOptions{ - Filters: filters, - }, - expectedQueryParams: map[string]string{ - "all": "", - "filter": "", - "filters": `{"dangling":{"true":true},"label":{"label1":true,"label2":true}}`, - }, - }, - { - options: types.ImageListOptions{ - Filters: noDanglingfilters, - }, - expectedQueryParams: map[string]string{ - "all": "", - "filter": "", - "filters": `{"dangling":{"false":true}}`, - }, - }, - } - for _, listCase := range listCases { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - for key, expected := range listCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - content, err := json.Marshal([]types.Image{ - { - ID: "image_id2", - }, - { - ID: "image_id2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - images, err := client.ImageList(context.Background(), listCase.options) - if err != nil { - t.Fatal(err) - } - if len(images) != 2 { - t.Fatalf("expected 2 images, got %v", images) - } - } -} diff --git a/vendor/github.com/docker/engine-api/client/image_load.go b/vendor/github.com/docker/engine-api/client/image_load.go deleted file mode 100644 index 72f55fdc015..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_load.go +++ /dev/null @@ -1,30 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -// ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser in the -// ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { - v := url.Values{} - v.Set("quiet", "0") - if quiet { - v.Set("quiet", "1") - } - headers := map[string][]string{"Content-Type": {"application/x-tar"}} - resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) - if err != nil { - return types.ImageLoadResponse{}, err - } - return types.ImageLoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", - }, nil -} diff --git a/vendor/github.com/docker/engine-api/client/image_load_test.go b/vendor/github.com/docker/engine-api/client/image_load_test.go deleted file mode 100644 index 0ee7cf35a6f..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_load_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestImageLoadError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.ImageLoad(context.Background(), nil, true) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImageLoad(t *testing.T) { - expectedURL := "/images/load" - expectedInput := "inputBody" - expectedOutput := "outputBody" - loadCases := []struct { - quiet bool - responseContentType string - expectedResponseJSON bool - expectedQueryParams map[string]string - }{ - { - quiet: false, - responseContentType: "text/plain", - expectedResponseJSON: false, - expectedQueryParams: map[string]string{ - "quiet": "0", - }, - }, - { - quiet: true, - responseContentType: "application/json", - expectedResponseJSON: true, - expectedQueryParams: map[string]string{ - "quiet": "1", - }, - }, - } - for _, loadCase := range loadCases { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - contentType := req.Header.Get("Content-Type") - if contentType != "application/x-tar" { - return nil, fmt.Errorf("content-type not set in URL headers properly. Expected 'application/x-tar', got %s", contentType) - } - query := req.URL.Query() - for key, expected := range loadCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - headers := http.Header{} - headers.Add("Content-Type", loadCase.responseContentType) - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), - Header: headers, - }, nil - }), - } - - input := bytes.NewReader([]byte(expectedInput)) - imageLoadResponse, err := client.ImageLoad(context.Background(), input, loadCase.quiet) - if err != nil { - t.Fatal(err) - } - if imageLoadResponse.JSON != loadCase.expectedResponseJSON { - t.Fatalf("expected a JSON response, was not.") - } - body, err := ioutil.ReadAll(imageLoadResponse.Body) - if err != nil { - t.Fatal(err) - } - if string(body) != expectedOutput { - t.Fatalf("expected %s, got %s", expectedOutput, string(body)) - } - } -} diff --git a/vendor/github.com/docker/engine-api/client/image_pull.go b/vendor/github.com/docker/engine-api/client/image_pull.go deleted file mode 100644 index e2c49ec52b1..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_pull.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "io" - "net/http" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" -) - -// ImagePull requests the docker host to pull an image from a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(ref) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", repository) - if tag != "" && !options.All { - query.Set("tag", tag) - } - - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/image_pull_test.go b/vendor/github.com/docker/engine-api/client/image_pull_test.go deleted file mode 100644 index 15063ae4fbf..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_pull_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -func TestImagePullReferenceParseError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - return nil, nil - }), - } - // An empty reference is an invalid reference - _, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{}) - if err == nil || err.Error() != "repository name must have at least one component" { - t.Fatalf("expected an error, got %v", err) - } -} - -func TestImagePullAnyError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImagePullStatusUnauthorizedError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) - if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { - t.Fatalf("expected a Unauthorized Error, got %v", err) - } -} - -func TestImagePullWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - privilegeFunc := func() (string, error) { - return "", fmt.Errorf("Error requesting privilege") - } - _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ - PrivilegeFunc: privilegeFunc, - }) - if err == nil || err.Error() != "Error requesting privilege" { - t.Fatalf("expected an error requesting privilege, got %v", err) - } -} - -func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusUnauthorized, "Unauthorized error")), - } - privilegeFunc := func() (string, error) { - return "a-auth-header", nil - } - _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ - PrivilegeFunc: privilegeFunc, - }) - if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { - t.Fatalf("expected a Unauthorized Error, got %v", err) - } -} - -func TestImagePullWithPrivilegedFuncNoError(t *testing.T) { - expectedURL := "/images/create" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - auth := req.Header.Get("X-Registry-Auth") - if auth == "NotValid" { - return &http.Response{ - StatusCode: http.StatusUnauthorized, - Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), - }, nil - } - if auth != "IAmValid" { - return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) - } - query := req.URL.Query() - fromImage := query.Get("fromImage") - if fromImage != "myimage" { - return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", "myimage", fromImage) - } - tag := query.Get("tag") - if tag != "latest" { - return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "latest", tag) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), - }, nil - }), - } - privilegeFunc := func() (string, error) { - return "IAmValid", nil - } - resp, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ - RegistryAuth: "NotValid", - PrivilegeFunc: privilegeFunc, - }) - if err != nil { - t.Fatal(err) - } - body, err := ioutil.ReadAll(resp) - if err != nil { - t.Fatal(err) - } - if string(body) != "hello world" { - t.Fatalf("expected 'hello world', got %s", string(body)) - } -} - -func TestImagePullWithoutErrors(t *testing.T) { - expectedURL := "/images/create" - expectedOutput := "hello world" - pullCases := []struct { - all bool - reference string - expectedImage string - expectedTag string - }{ - { - all: false, - reference: "myimage", - expectedImage: "myimage", - expectedTag: "latest", - }, - { - all: false, - reference: "myimage:tag", - expectedImage: "myimage", - expectedTag: "tag", - }, - { - all: true, - reference: "myimage", - expectedImage: "myimage", - expectedTag: "", - }, - { - all: true, - reference: "myimage:anything", - expectedImage: "myimage", - expectedTag: "", - }, - } - for _, pullCase := range pullCases { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - fromImage := query.Get("fromImage") - if fromImage != pullCase.expectedImage { - return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", pullCase.expectedImage, fromImage) - } - tag := query.Get("tag") - if tag != pullCase.expectedTag { - return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), - }, nil - }), - } - resp, err := client.ImagePull(context.Background(), pullCase.reference, types.ImagePullOptions{ - All: pullCase.all, - }) - if err != nil { - t.Fatal(err) - } - body, err := ioutil.ReadAll(resp) - if err != nil { - t.Fatal(err) - } - if string(body) != expectedOutput { - t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) - } - } -} diff --git a/vendor/github.com/docker/engine-api/client/image_push.go b/vendor/github.com/docker/engine-api/client/image_push.go deleted file mode 100644 index 9c837a76d12..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_push.go +++ /dev/null @@ -1,54 +0,0 @@ -package client - -import ( - "errors" - "io" - "net/http" - "net/url" - - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types" -) - -// ImagePush requests the docker host to push an image to a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return nil, err - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - var tag = "" - if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { - tag = nameTaggedRef.Tag() - } - - query := url.Values{} - query.Set("tag", tag) - - resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) -} diff --git a/vendor/github.com/docker/engine-api/client/image_remove.go b/vendor/github.com/docker/engine-api/client/image_remove.go deleted file mode 100644 index 47224326e0c..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_remove.go +++ /dev/null @@ -1,31 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { - query := url.Values{} - - if options.Force { - query.Set("force", "1") - } - if !options.PruneChildren { - query.Set("noprune", "1") - } - - resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) - if err != nil { - return nil, err - } - - var dels []types.ImageDelete - err = json.NewDecoder(resp.body).Decode(&dels) - ensureReaderClosed(resp) - return dels, err -} diff --git a/vendor/github.com/docker/engine-api/client/image_remove_test.go b/vendor/github.com/docker/engine-api/client/image_remove_test.go deleted file mode 100644 index 6dbe87f1d0b..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_remove_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestImageRemoveError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestImageRemove(t *testing.T) { - expectedURL := "/images/image_id" - removeCases := []struct { - force bool - pruneChildren bool - expectedQueryParams map[string]string - }{ - { - force: false, - pruneChildren: false, - expectedQueryParams: map[string]string{ - "force": "", - "noprune": "1", - }, - }, { - force: true, - pruneChildren: true, - expectedQueryParams: map[string]string{ - "force": "1", - "noprune": "", - }, - }, - } - for _, removeCase := range removeCases { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - query := req.URL.Query() - for key, expected := range removeCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - b, err := json.Marshal([]types.ImageDelete{ - { - Untagged: "image_id1", - }, - { - Deleted: "image_id", - }, - }) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - imageDeletes, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{ - Force: removeCase.force, - PruneChildren: removeCase.pruneChildren, - }) - if err != nil { - t.Fatal(err) - } - if len(imageDeletes) != 2 { - t.Fatalf("expected 2 deleted images, got %v", imageDeletes) - } - } -} diff --git a/vendor/github.com/docker/engine-api/client/image_save.go b/vendor/github.com/docker/engine-api/client/image_save.go deleted file mode 100644 index ecac880a32c..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_save.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" -) - -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { - query := url.Values{ - "names": imageIDs, - } - - resp, err := cli.get(ctx, "/images/get", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/image_save_test.go b/vendor/github.com/docker/engine-api/client/image_save_test.go deleted file mode 100644 index 8ee40c43ae8..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_save_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "testing" - - "golang.org/x/net/context" - - "strings" -) - -func TestImageSaveError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - _, err := client.ImageSave(context.Background(), []string{"nothing"}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server error, got %v", err) - } -} - -func TestImageSave(t *testing.T) { - expectedURL := "/images/get" - client := &Client{ - transport: newMockClient(nil, func(r *http.Request) (*http.Response, error) { - if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) - } - query := r.URL.Query() - names := query["names"] - expectedNames := []string{"image_id1", "image_id2"} - if !reflect.DeepEqual(names, expectedNames) { - return nil, fmt.Errorf("names not set in URL query properly. Expected %v, got %v", names, expectedNames) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), - }, nil - }), - } - saveResponse, err := client.ImageSave(context.Background(), []string{"image_id1", "image_id2"}) - if err != nil { - t.Fatal(err) - } - response, err := ioutil.ReadAll(saveResponse) - if err != nil { - t.Fatal(err) - } - saveResponse.Close() - if string(response) != "response" { - t.Fatalf("expected response to contain 'response', got %s", string(response)) - } -} diff --git a/vendor/github.com/docker/engine-api/client/image_search.go b/vendor/github.com/docker/engine-api/client/image_search.go deleted file mode 100644 index 571ba3df365..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_search.go +++ /dev/null @@ -1,49 +0,0 @@ -package client - -import ( - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/registry" - "golang.org/x/net/context" -) - -// ImageSearch makes the docker host to search by a term in a remote registry. -// The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { - var results []registry.SearchResult - query := url.Values{} - query.Set("term", term) - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } - - resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return results, privilegeErr - } - resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) - } - if err != nil { - return results, err - } - - err = json.NewDecoder(resp.body).Decode(&results) - ensureReaderClosed(resp) - return results, err -} - -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/images/search", query, headers) -} diff --git a/vendor/github.com/docker/engine-api/client/image_tag.go b/vendor/github.com/docker/engine-api/client/image_tag.go deleted file mode 100644 index 490de4e5fea..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_tag.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "errors" - "fmt" - "net/url" - - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" -) - -// ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, imageID, ref string, options types.ImageTagOptions) error { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - tag := reference.GetTagFromNamedRef(distributionRef) - - query := url.Values{} - query.Set("repo", distributionRef.Name()) - query.Set("tag", tag) - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/image_tag_test.go b/vendor/github.com/docker/engine-api/client/image_tag_test.go deleted file mode 100644 index 803763cb10d..00000000000 --- a/vendor/github.com/docker/engine-api/client/image_tag_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -func TestImageTagError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.ImageTag(context.Background(), "image_id", "repo:tag", types.ImageTagOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -// Note: this is not testing all the InvalidReference as it's the reponsability -// of distribution/reference package. -func TestImageTagInvalidReference(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.ImageTag(context.Background(), "image_id", "aa/asdf$$^/aa", types.ImageTagOptions{}) - if err == nil || err.Error() != `Error parsing reference: "aa/asdf$$^/aa" is not a valid repository/tag` { - t.Fatalf("expected ErrReferenceInvalidFormat, got %v", err) - } -} - -func TestImageTag(t *testing.T) { - expectedURL := "/images/image_id/tag" - tagCases := []struct { - force bool - reference string - expectedQueryParams map[string]string - }{ - { - force: false, - reference: "repository:tag1", - expectedQueryParams: map[string]string{ - "force": "", - "repo": "repository", - "tag": "tag1", - }, - }, { - force: true, - reference: "another_repository:latest", - expectedQueryParams: map[string]string{ - "force": "1", - "repo": "another_repository", - "tag": "latest", - }, - }, { - force: true, - reference: "another_repository", - expectedQueryParams: map[string]string{ - "force": "1", - "repo": "another_repository", - "tag": "latest", - }, - }, { - force: true, - reference: "test/another_repository", - expectedQueryParams: map[string]string{ - "force": "1", - "repo": "test/another_repository", - "tag": "latest", - }, - }, { - force: true, - reference: "test/another_repository:tag1", - expectedQueryParams: map[string]string{ - "force": "1", - "repo": "test/another_repository", - "tag": "tag1", - }, - }, { - force: true, - reference: "test/test/another_repository:tag1", - expectedQueryParams: map[string]string{ - "force": "1", - "repo": "test/test/another_repository", - "tag": "tag1", - }, - }, { - force: true, - reference: "test:5000/test/another_repository:tag1", - expectedQueryParams: map[string]string{ - "force": "1", - "repo": "test:5000/test/another_repository", - "tag": "tag1", - }, - }, { - force: true, - reference: "test:5000/test/another_repository", - expectedQueryParams: map[string]string{ - "force": "1", - "repo": "test:5000/test/another_repository", - "tag": "latest", - }, - }, - } - for _, tagCase := range tagCases { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - query := req.URL.Query() - for key, expected := range tagCase.expectedQueryParams { - actual := query.Get(key) - if actual != expected { - return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) - } - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - err := client.ImageTag(context.Background(), "image_id", tagCase.reference, types.ImageTagOptions{ - Force: tagCase.force, - }) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/github.com/docker/engine-api/client/info.go b/vendor/github.com/docker/engine-api/client/info.go deleted file mode 100644 index ff0958d65ce..00000000000 --- a/vendor/github.com/docker/engine-api/client/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package client - -import ( - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (types.Info, error) { - var info types.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - if err != nil { - return info, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { - return info, fmt.Errorf("Error reading remote info: %v", err) - } - - return info, nil -} diff --git a/vendor/github.com/docker/engine-api/client/info_test.go b/vendor/github.com/docker/engine-api/client/info_test.go deleted file mode 100644 index 56110c1ab75..00000000000 --- a/vendor/github.com/docker/engine-api/client/info_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func infoMock(req *http.Request) (*http.Response, error) { - info := &types.Info{ - ID: "daemonID", - Containers: 3, - } - b, err := json.Marshal(info) - if err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil -} - -func TestInfo(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, infoMock), - } - - info, err := client.Info(context.Background()) - if err != nil { - t.Fatal(err) - } - - if info.ID != "daemonID" { - t.Fatalf("expected daemonID, got %s", info.ID) - } - - if info.Containers != 3 { - t.Fatalf("expected 3 containers, got %d", info.Containers) - } -} diff --git a/vendor/github.com/docker/engine-api/client/interface.go b/vendor/github.com/docker/engine-api/client/interface.go deleted file mode 100644 index 2c6872f534b..00000000000 --- a/vendor/github.com/docker/engine-api/client/interface.go +++ /dev/null @@ -1,79 +0,0 @@ -package client - -import ( - "io" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/registry" -) - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - ClientVersion() string - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) - ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, timeout int) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (io.ReadCloser, error) - ContainerStart(ctx context.Context, container string) error - ContainerStop(ctx context.Context, container string, timeout int) error - ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) error - ContainerWait(ctx context.Context, container string) (int, error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error - Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string, getSize bool) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) - ImageTag(ctx context.Context, image, ref string, options types.ImageTagOptions) error - Info(ctx context.Context) (types.Info, error) - NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error - NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) - NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, networkID string) error - RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) - ServerVersion(ctx context.Context) (types.Version, error) - UpdateClientVersion(v string) - VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) - VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) - VolumeRemove(ctx context.Context, volumeID string) error -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/engine-api/client/login.go b/vendor/github.com/docker/engine-api/client/login.go deleted file mode 100644 index 482f94789f0..00000000000 --- a/vendor/github.com/docker/engine-api/client/login.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// RegistryLogin authenticates the docker server with a given docker registry. -// It returns UnauthorizerError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) { - resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) - - if resp != nil && resp.statusCode == http.StatusUnauthorized { - return types.AuthResponse{}, unauthorizedError{err} - } - if err != nil { - return types.AuthResponse{}, err - } - - var response types.AuthResponse - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/network_connect.go b/vendor/github.com/docker/engine-api/client/network_connect.go deleted file mode 100644 index 9a402a3e638..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_connect.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -import ( - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - "golang.org/x/net/context" -) - -// NetworkConnect connects a container to an existent network in the docker host. -func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := types.NetworkConnect{ - Container: containerID, - EndpointConfig: config, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/network_connect_test.go b/vendor/github.com/docker/engine-api/client/network_connect_test.go deleted file mode 100644 index 92967673206..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_connect_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" -) - -func TestNetworkConnectError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworkConnectEmptyNilEndpointSettings(t *testing.T) { - expectedURL := "/networks/network_id/connect" - - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - - var connect types.NetworkConnect - if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { - return nil, err - } - - if connect.Container != "container_id" { - return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) - } - - if connect.EndpointConfig != nil { - return nil, fmt.Errorf("expected connect.EndpointConfig to be nil, got %v", connect.EndpointConfig) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) - if err != nil { - t.Fatal(err) - } -} - -func TestNetworkConnect(t *testing.T) { - expectedURL := "/networks/network_id/connect" - - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - - var connect types.NetworkConnect - if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { - return nil, err - } - - if connect.Container != "container_id" { - return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) - } - - if connect.EndpointConfig.NetworkID != "NetworkID" { - return nil, fmt.Errorf("expected 'NetworkID', got %s", connect.EndpointConfig.NetworkID) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.NetworkConnect(context.Background(), "network_id", "container_id", &network.EndpointSettings{ - NetworkID: "NetworkID", - }) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/network_create.go b/vendor/github.com/docker/engine-api/client/network_create.go deleted file mode 100644 index c9c0b9fde77..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, - Name: name, - } - var response types.NetworkCreateResponse - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - if err != nil { - return response, err - } - - json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/network_create_test.go b/vendor/github.com/docker/engine-api/client/network_create_test.go deleted file mode 100644 index 979c127ff32..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_create_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestNetworkCreateError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworkCreate(t *testing.T) { - expectedURL := "/networks/create" - - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - - content, err := json.Marshal(types.NetworkCreateResponse{ - ID: "network_id", - Warning: "warning", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - networkResponse, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{ - CheckDuplicate: true, - Driver: "mydriver", - EnableIPv6: true, - Internal: true, - Options: map[string]string{ - "opt-key": "opt-value", - }, - }) - if err != nil { - t.Fatal(err) - } - if networkResponse.ID != "network_id" { - t.Fatalf("expected networkResponse.ID to be 'network_id', got %s", networkResponse.ID) - } - if networkResponse.Warning != "warning" { - t.Fatalf("expected networkResponse.Warning to be 'warning', got %s", networkResponse.Warning) - } -} diff --git a/vendor/github.com/docker/engine-api/client/network_disconnect.go b/vendor/github.com/docker/engine-api/client/network_disconnect.go deleted file mode 100644 index a3e33672fef..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_disconnect.go +++ /dev/null @@ -1,14 +0,0 @@ -package client - -import ( - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// NetworkDisconnect disconnects a container from an existent network in the docker host. -func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := types.NetworkDisconnect{Container: containerID, Force: force} - resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/network_disconnect_test.go b/vendor/github.com/docker/engine-api/client/network_disconnect_test.go deleted file mode 100644 index a00436c4c4e..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_disconnect_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestNetworkDisconnectError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", false) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworkDisconnect(t *testing.T) { - expectedURL := "/networks/network_id/disconnect" - - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - - var disconnect types.NetworkDisconnect - if err := json.NewDecoder(req.Body).Decode(&disconnect); err != nil { - return nil, err - } - - if disconnect.Container != "container_id" { - return nil, fmt.Errorf("expected 'container_id', got %s", disconnect.Container) - } - - if !disconnect.Force { - return nil, fmt.Errorf("expected Force to be true, got %v", disconnect.Force) - } - - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), - }, nil - }), - } - - err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", true) - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/network_inspect.go b/vendor/github.com/docker/engine-api/client/network_inspect.go deleted file mode 100644 index 4f81e5ce40f..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_inspect.go +++ /dev/null @@ -1,24 +0,0 @@ -package client - -import ( - "encoding/json" - "net/http" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { - var networkResource types.NetworkResource - resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return networkResource, networkNotFoundError{networkID} - } - return networkResource, err - } - err = json.NewDecoder(resp.body).Decode(&networkResource) - ensureReaderClosed(resp) - return networkResource, err -} diff --git a/vendor/github.com/docker/engine-api/client/network_inspect_test.go b/vendor/github.com/docker/engine-api/client/network_inspect_test.go deleted file mode 100644 index e663b16c42a..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_inspect_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestNetworkInspectError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.NetworkInspect(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworkInspectContainerNotFound(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusNotFound, "Server error")), - } - - _, err := client.NetworkInspect(context.Background(), "unknown") - if err == nil || !IsErrNetworkNotFound(err) { - t.Fatalf("expected a containerNotFound error, got %v", err) - } -} - -func TestNetworkInspect(t *testing.T) { - expectedURL := "/networks/network_id" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "GET" { - return nil, fmt.Errorf("expected GET method, got %s", req.Method) - } - - content, err := json.Marshal(types.NetworkResource{ - Name: "mynetwork", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - r, err := client.NetworkInspect(context.Background(), "network_id") - if err != nil { - t.Fatal(err) - } - if r.Name != "mynetwork" { - t.Fatalf("expected `mynetwork`, got %s", r.Name) - } -} diff --git a/vendor/github.com/docker/engine-api/client/network_list.go b/vendor/github.com/docker/engine-api/client/network_list.go deleted file mode 100644 index 813109c1802..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_list.go +++ /dev/null @@ -1,31 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var networkResources []types.NetworkResource - resp, err := cli.get(ctx, "/networks", query, nil) - if err != nil { - return networkResources, err - } - err = json.NewDecoder(resp.body).Decode(&networkResources) - ensureReaderClosed(resp) - return networkResources, err -} diff --git a/vendor/github.com/docker/engine-api/client/network_list_test.go b/vendor/github.com/docker/engine-api/client/network_list_test.go deleted file mode 100644 index d1c7f42fc81..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_list_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -func TestNetworkListError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.NetworkList(context.Background(), types.NetworkListOptions{ - Filters: filters.NewArgs(), - }) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworkList(t *testing.T) { - expectedURL := "/networks" - - noDanglingFilters := filters.NewArgs() - noDanglingFilters.Add("dangling", "false") - - danglingFilters := filters.NewArgs() - danglingFilters.Add("dangling", "true") - - labelFilters := filters.NewArgs() - labelFilters.Add("label", "label1") - labelFilters.Add("label", "label2") - - listCases := []struct { - options types.NetworkListOptions - expectedFilters string - }{ - { - options: types.NetworkListOptions{ - Filters: filters.NewArgs(), - }, - expectedFilters: "", - }, { - options: types.NetworkListOptions{ - Filters: noDanglingFilters, - }, - expectedFilters: `{"dangling":{"false":true}}`, - }, { - options: types.NetworkListOptions{ - Filters: danglingFilters, - }, - expectedFilters: `{"dangling":{"true":true}}`, - }, { - options: types.NetworkListOptions{ - Filters: labelFilters, - }, - expectedFilters: `{"label":{"label1":true,"label2":true}}`, - }, - } - - for _, listCase := range listCases { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "GET" { - return nil, fmt.Errorf("expected GET method, got %s", req.Method) - } - query := req.URL.Query() - actualFilters := query.Get("filters") - if actualFilters != listCase.expectedFilters { - return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) - } - content, err := json.Marshal([]types.NetworkResource{ - { - Name: "network", - Driver: "bridge", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - networkResources, err := client.NetworkList(context.Background(), listCase.options) - if err != nil { - t.Fatal(err) - } - if len(networkResources) != 1 { - t.Fatalf("expected 1 network resource, got %v", networkResources) - } - } -} diff --git a/vendor/github.com/docker/engine-api/client/network_remove.go b/vendor/github.com/docker/engine-api/client/network_remove.go deleted file mode 100644 index 6bd67489242..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// NetworkRemove removes an existent network from the docker host. -func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { - resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/network_remove_test.go b/vendor/github.com/docker/engine-api/client/network_remove_test.go deleted file mode 100644 index d8cfa0ed6e0..00000000000 --- a/vendor/github.com/docker/engine-api/client/network_remove_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestNetworkRemoveError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.NetworkRemove(context.Background(), "network_id") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestNetworkRemove(t *testing.T) { - expectedURL := "/networks/network_id" - - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - err := client.NetworkRemove(context.Background(), "network_id") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/client/request.go b/vendor/github.com/docker/engine-api/client/request.go deleted file mode 100644 index cdbb0975bd2..00000000000 --- a/vendor/github.com/docker/engine-api/client/request.go +++ /dev/null @@ -1,185 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/docker/engine-api/client/transport/cancellable" - "golang.org/x/net/context" -) - -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int -} - -// head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) -} - -// getWithContext sends an http request to the docker API using the method GET with a specific go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "GET", path, query, nil, headers) -} - -// postWithContext sends an http request to the docker API using the method POST with a specific go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "POST", path, query, obj, headers) -} - -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - return cli.sendClientRequest(ctx, "POST", path, query, body, headers) -} - -// put sends an http request to the docker API using the method PUT. -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "PUT", path, query, obj, headers) -} - -// put sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - return cli.sendClientRequest(ctx, "PUT", path, query, body, headers) -} - -// delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) -} - -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - var body io.Reader - - if obj != nil { - var err error - body, err = encodeData(obj) - if err != nil { - return nil, err - } - if headers == nil { - headers = make(map[string][]string) - } - headers["Content-Type"] = []string{"application/json"} - } - - return cli.sendClientRequest(ctx, method, path, query, body, headers) -} - -func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - serverResp := &serverResponse{ - body: nil, - statusCode: -1, - } - - expectedPayload := (method == "POST" || method == "PUT") - if expectedPayload && body == nil { - body = bytes.NewReader([]byte{}) - } - - req, err := cli.newRequest(method, path, query, body, headers) - if cli.proto == "unix" || cli.proto == "npipe" { - // For local communications, it doesn't matter what the host is. We just - // need a valid and meaningful host name. (See #189) - req.Host = "docker" - } - req.URL.Host = cli.addr - req.URL.Scheme = cli.transport.Scheme() - - if expectedPayload && req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "text/plain") - } - - resp, err := cancellable.Do(ctx, cli.transport, req) - if resp != nil { - serverResp.statusCode = resp.StatusCode - } - - if err != nil { - if isTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { - return serverResp, ErrConnectionFailed - } - - if !cli.transport.Secure() && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) - } - if cli.transport.Secure() && strings.Contains(err.Error(), "remote error: bad certificate") { - return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) - } - - return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err) - } - - if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return serverResp, err - } - if len(body) == 0 { - return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) - } - return serverResp, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) - } - - serverResp.body = resp.Body - serverResp.header = resp.Header - return serverResp, nil -} - -func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) { - apiPath := cli.getAPIPath(path, query) - req, err := http.NewRequest(method, apiPath, body) - if err != nil { - return nil, err - } - - // Add CLI Config's HTTP Headers BEFORE we set the Docker headers - // then the user can't change OUR headers - for k, v := range cli.customHTTPHeaders { - req.Header.Set(k, v) - } - - if headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - - return req, nil -} - -func encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if err := json.NewEncoder(params).Encode(data); err != nil { - return nil, err - } - } - return params, nil -} - -func ensureReaderClosed(response *serverResponse) { - if response != nil && response.body != nil { - response.body.Close() - } -} - -func isTimeout(err error) bool { - type timeout interface { - Timeout() bool - } - e := err - switch urlErr := err.(type) { - case *url.Error: - e = urlErr.Err - } - t, ok := e.(timeout) - return ok && t.Timeout() -} diff --git a/vendor/github.com/docker/engine-api/client/request_test.go b/vendor/github.com/docker/engine-api/client/request_test.go deleted file mode 100644 index 3edcfc68901..00000000000 --- a/vendor/github.com/docker/engine-api/client/request_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -// TestSetHostHeader should set fake host for local communications, set real host -// for normal communications. -func TestSetHostHeader(t *testing.T) { - testURL := "/test" - testCases := []struct { - host string - expectedHost string - expectedURLHost string - }{ - { - "unix:///var/run/docker.sock", - "docker", - "/var/run/docker.sock", - }, - { - "npipe:////./pipe/docker_engine", - "docker", - "//./pipe/docker_engine", - }, - { - "tcp://0.0.0.0:4243", - "", - "0.0.0.0:4243", - }, - { - "tcp://localhost:4243", - "", - "localhost:4243", - }, - } - - for c, test := range testCases { - proto, addr, basePath, err := ParseHost(test.host) - if err != nil { - t.Fatal(err) - } - - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, testURL) { - return nil, fmt.Errorf("Test Case #%d: Expected URL %q, got %q", c, testURL, req.URL) - } - if req.Host != test.expectedHost { - return nil, fmt.Errorf("Test Case #%d: Expected host %q, got %q", c, test.expectedHost, req.Host) - } - if req.URL.Host != test.expectedURLHost { - return nil, fmt.Errorf("Test Case #%d: Expected URL host %q, got %q", c, test.expectedURLHost, req.URL.Host) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(([]byte("")))), - }, nil - }), - proto: proto, - addr: addr, - basePath: basePath, - } - - _, err = client.sendRequest(context.Background(), "GET", testURL, nil, nil, nil) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go deleted file mode 100644 index 11dff60026c..00000000000 --- a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package cancellable - -import ( - "net/http" - - "github.com/docker/engine-api/client/transport" -) - -func canceler(client transport.Sender, req *http.Request) func() { - // TODO(djd): Respect any existing value of req.Cancel. - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go deleted file mode 100644 index 8ff2845c28e..00000000000 --- a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package cancellable - -import ( - "net/http" - - "github.com/docker/engine-api/client/transport" -) - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -func canceler(client transport.Sender, req *http.Request) func() { - rc, ok := client.(requestCanceler) - if !ok { - return func() {} - } - return func() { - rc.CancelRequest(req) - } -} diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go deleted file mode 100644 index 526feb0f456..00000000000 --- a/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cancellable provides helper function to cancel http requests. -package cancellable - -import ( - "io" - "net/http" - - "github.com/docker/engine-api/client/transport" - - "golang.org/x/net/context" -) - -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop -) - -// Do sends an HTTP request with the provided transport.Sender and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -// -// FORK INFORMATION: -// -// This function deviates from the upstream version in golang.org/x/net/context/ctxhttp by -// taking a Sender interface rather than a *http.Client directly. That allow us to use -// this funcion with mocked clients and hijacked connections. -func Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go. - cancel := canceler(client, req) - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - cancel() - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil && r.resp.Body != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - cancel() - case <-c: - // The response's Body is closed. - } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil -} - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/vendor/github.com/docker/engine-api/client/transport/client.go b/vendor/github.com/docker/engine-api/client/transport/client.go deleted file mode 100644 index 13d4b3ab3de..00000000000 --- a/vendor/github.com/docker/engine-api/client/transport/client.go +++ /dev/null @@ -1,47 +0,0 @@ -package transport - -import ( - "crypto/tls" - "net/http" -) - -// Sender is an interface that clients must implement -// to be able to send requests to a remote connection. -type Sender interface { - // Do sends request to a remote endpoint. - Do(*http.Request) (*http.Response, error) -} - -// Client is an interface that abstracts all remote connections. -type Client interface { - Sender - // Secure tells whether the connection is secure or not. - Secure() bool - // Scheme returns the connection protocol the client uses. - Scheme() string - // TLSConfig returns any TLS configuration the client uses. - TLSConfig() *tls.Config -} - -// tlsInfo returns information about the TLS configuration. -type tlsInfo struct { - tlsConfig *tls.Config -} - -// TLSConfig returns the TLS configuration. -func (t *tlsInfo) TLSConfig() *tls.Config { - return t.tlsConfig -} - -// Scheme returns protocol scheme to use. -func (t *tlsInfo) Scheme() string { - if t.tlsConfig != nil { - return "https" - } - return "http" -} - -// Secure returns true if there is a TLS configuration. -func (t *tlsInfo) Secure() bool { - return t.tlsConfig != nil -} diff --git a/vendor/github.com/docker/engine-api/client/transport/transport.go b/vendor/github.com/docker/engine-api/client/transport/transport.go deleted file mode 100644 index ff28af1855f..00000000000 --- a/vendor/github.com/docker/engine-api/client/transport/transport.go +++ /dev/null @@ -1,57 +0,0 @@ -// Package transport provides function to send request to remote endpoints. -package transport - -import ( - "fmt" - "net/http" - - "github.com/docker/go-connections/sockets" -) - -// apiTransport holds information about the http transport to connect with the API. -type apiTransport struct { - *http.Client - *tlsInfo - transport *http.Transport -} - -// NewTransportWithHTTP creates a new transport based on the provided proto, address and http client. -// It uses Docker's default http transport configuration if the client is nil. -// It does not modify the client's transport if it's not nil. -func NewTransportWithHTTP(proto, addr string, client *http.Client) (Client, error) { - var transport *http.Transport - - if client != nil { - tr, ok := client.Transport.(*http.Transport) - if !ok { - return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) - } - transport = tr - } else { - transport = defaultTransport(proto, addr) - client = &http.Client{ - Transport: transport, - } - } - - return &apiTransport{ - Client: client, - tlsInfo: &tlsInfo{transport.TLSClientConfig}, - transport: transport, - }, nil -} - -// CancelRequest stops a request execution. -func (a *apiTransport) CancelRequest(req *http.Request) { - a.transport.CancelRequest(req) -} - -// defaultTransport creates a new http.Transport with Docker's -// default transport configuration. -func defaultTransport(proto, addr string) *http.Transport { - tr := new(http.Transport) - sockets.ConfigureTransport(tr, proto, addr) - return tr -} - -var _ Client = &apiTransport{} diff --git a/vendor/github.com/docker/engine-api/client/version.go b/vendor/github.com/docker/engine-api/client/version.go deleted file mode 100644 index e037551a21b..00000000000 --- a/vendor/github.com/docker/engine-api/client/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ServerVersion returns information of the docker client and server host. -func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { - resp, err := cli.get(ctx, "/version", nil, nil) - if err != nil { - return types.Version{}, err - } - - var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) - ensureReaderClosed(resp) - return server, err -} diff --git a/vendor/github.com/docker/engine-api/client/volume_create.go b/vendor/github.com/docker/engine-api/client/volume_create.go deleted file mode 100644 index cc1e1c17723..00000000000 --- a/vendor/github.com/docker/engine-api/client/volume_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) { - var volume types.Volume - resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) - if err != nil { - return volume, err - } - err = json.NewDecoder(resp.body).Decode(&volume) - ensureReaderClosed(resp) - return volume, err -} diff --git a/vendor/github.com/docker/engine-api/client/volume_create_test.go b/vendor/github.com/docker/engine-api/client/volume_create_test.go deleted file mode 100644 index 7d82b8e4151..00000000000 --- a/vendor/github.com/docker/engine-api/client/volume_create_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestVolumeCreateError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.VolumeCreate(context.Background(), types.VolumeCreateRequest{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestVolumeCreate(t *testing.T) { - expectedURL := "/volumes/create" - - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - - if req.Method != "POST" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) - } - - content, err := json.Marshal(types.Volume{ - Name: "volume", - Driver: "local", - Mountpoint: "mountpoint", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - volume, err := client.VolumeCreate(context.Background(), types.VolumeCreateRequest{ - Name: "myvolume", - Driver: "mydriver", - DriverOpts: map[string]string{ - "opt-key": "opt-value", - }, - }) - if err != nil { - t.Fatal(err) - } - if volume.Name != "volume" { - t.Fatalf("expected volume.Name to be 'volume', got %s", volume.Name) - } - if volume.Driver != "local" { - t.Fatalf("expected volume.Driver to be 'local', got %s", volume.Driver) - } - if volume.Mountpoint != "mountpoint" { - t.Fatalf("expected volume.Mountpoint to be 'mountpoint', got %s", volume.Mountpoint) - } -} diff --git a/vendor/github.com/docker/engine-api/client/volume_inspect.go b/vendor/github.com/docker/engine-api/client/volume_inspect.go deleted file mode 100644 index 4bf4a7b084a..00000000000 --- a/vendor/github.com/docker/engine-api/client/volume_inspect.go +++ /dev/null @@ -1,24 +0,0 @@ -package client - -import ( - "encoding/json" - "net/http" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { - var volume types.Volume - resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return volume, volumeNotFoundError{volumeID} - } - return volume, err - } - err = json.NewDecoder(resp.body).Decode(&volume) - ensureReaderClosed(resp) - return volume, err -} diff --git a/vendor/github.com/docker/engine-api/client/volume_inspect_test.go b/vendor/github.com/docker/engine-api/client/volume_inspect_test.go deleted file mode 100644 index 09ddfd3ec42..00000000000 --- a/vendor/github.com/docker/engine-api/client/volume_inspect_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func TestVolumeInspectError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.VolumeInspect(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestVolumeInspectNotFound(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusNotFound, "Server error")), - } - - _, err := client.VolumeInspect(context.Background(), "unknown") - if err == nil || !IsErrVolumeNotFound(err) { - t.Fatalf("expected a volumeNotFound error, got %v", err) - } -} - -func TestVolumeInspect(t *testing.T) { - expectedURL := "/volumes/volume_id" - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "GET" { - return nil, fmt.Errorf("expected GET method, got %s", req.Method) - } - content, err := json.Marshal(types.Volume{ - Name: "name", - Driver: "driver", - Mountpoint: "mountpoint", - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - v, err := client.VolumeInspect(context.Background(), "volume_id") - if err != nil { - t.Fatal(err) - } - if v.Name != "name" { - t.Fatalf("expected `name`, got %s", v.Name) - } - if v.Driver != "driver" { - t.Fatalf("expected `driver`, got %s", v.Driver) - } - if v.Mountpoint != "mountpoint" { - t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint) - } -} diff --git a/vendor/github.com/docker/engine-api/client/volume_list.go b/vendor/github.com/docker/engine-api/client/volume_list.go deleted file mode 100644 index bb4c40d5f98..00000000000 --- a/vendor/github.com/docker/engine-api/client/volume_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) { - var volumes types.VolumesListResponse - query := url.Values{} - - if filter.Len() > 0 { - filterJSON, err := filters.ToParam(filter) - if err != nil { - return volumes, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/volumes", query, nil) - if err != nil { - return volumes, err - } - - err = json.NewDecoder(resp.body).Decode(&volumes) - ensureReaderClosed(resp) - return volumes, err -} diff --git a/vendor/github.com/docker/engine-api/client/volume_list_test.go b/vendor/github.com/docker/engine-api/client/volume_list_test.go deleted file mode 100644 index c06f4114ab5..00000000000 --- a/vendor/github.com/docker/engine-api/client/volume_list_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -func TestVolumeListError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - _, err := client.VolumeList(context.Background(), filters.NewArgs()) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestVolumeList(t *testing.T) { - expectedURL := "/volumes" - - noDanglingFilters := filters.NewArgs() - noDanglingFilters.Add("dangling", "false") - - danglingFilters := filters.NewArgs() - danglingFilters.Add("dangling", "true") - - labelFilters := filters.NewArgs() - labelFilters.Add("label", "label1") - labelFilters.Add("label", "label2") - - listCases := []struct { - filters filters.Args - expectedFilters string - }{ - { - filters: filters.NewArgs(), - expectedFilters: "", - }, { - filters: noDanglingFilters, - expectedFilters: `{"dangling":{"false":true}}`, - }, { - filters: danglingFilters, - expectedFilters: `{"dangling":{"true":true}}`, - }, { - filters: labelFilters, - expectedFilters: `{"label":{"label1":true,"label2":true}}`, - }, - } - - for _, listCase := range listCases { - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - query := req.URL.Query() - actualFilters := query.Get("filters") - if actualFilters != listCase.expectedFilters { - return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) - } - content, err := json.Marshal(types.VolumesListResponse{ - Volumes: []*types.Volume{ - { - Name: "volume", - Driver: "local", - }, - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - - volumeResponse, err := client.VolumeList(context.Background(), listCase.filters) - if err != nil { - t.Fatal(err) - } - if len(volumeResponse.Volumes) != 1 { - t.Fatalf("expected 1 volume, got %v", volumeResponse.Volumes) - } - } -} diff --git a/vendor/github.com/docker/engine-api/client/volume_remove.go b/vendor/github.com/docker/engine-api/client/volume_remove.go deleted file mode 100644 index 0dce24c79b8..00000000000 --- a/vendor/github.com/docker/engine-api/client/volume_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// VolumeRemove removes a volume from the docker host. -func (cli *Client) VolumeRemove(ctx context.Context, volumeID string) error { - resp, err := cli.delete(ctx, "/volumes/"+volumeID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/volume_remove_test.go b/vendor/github.com/docker/engine-api/client/volume_remove_test.go deleted file mode 100644 index 45fc52aa465..00000000000 --- a/vendor/github.com/docker/engine-api/client/volume_remove_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "golang.org/x/net/context" -) - -func TestVolumeRemoveError(t *testing.T) { - client := &Client{ - transport: newMockClient(nil, errorMock(http.StatusInternalServerError, "Server error")), - } - - err := client.VolumeRemove(context.Background(), "volume_id") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } -} - -func TestVolumeRemove(t *testing.T) { - expectedURL := "/volumes/volume_id" - - client := &Client{ - transport: newMockClient(nil, func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - if req.Method != "DELETE" { - return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), - }, nil - }), - } - - err := client.VolumeRemove(context.Background(), "volume_id") - if err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/engine-api/doc.go b/vendor/github.com/docker/engine-api/doc.go deleted file mode 100644 index 8e2da06a91e..00000000000 --- a/vendor/github.com/docker/engine-api/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Package engineapi provides libraries to implement client and server components compatible with the Docker engine. - -The client package in github.com/docker/engine-api/client implements all necessary requests to implement the official Docker engine cli. - -Create a new client, then use it to send and receive messages to the Docker engine API: - - defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"} - cli, err := client.NewClient("unix:///var/run/docker.sock", "v1.22", nil, defaultHeaders) - -Other programs, like Docker Machine, can set the default Docker engine environment for you. There is a shortcut to use its variables to configure the client: - - cli, err := client.NewEnvClient() - -All request arguments are defined as typed structures in the types package. For instance, this is how to get all containers running in the host: - - options := types.ContainerListOptions{All: true} - containers, err := cli.ContainerList(context.Background(), options) - -*/ -package engineapi diff --git a/vendor/github.com/docker/engine-api/types/container/config.go b/vendor/github.com/docker/engine-api/types/container/config.go deleted file mode 100644 index 1dfc4083480..00000000000 --- a/vendor/github.com/docker/engine-api/types/container/config.go +++ /dev/null @@ -1,37 +0,0 @@ -package container - -import ( - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (eg. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container -} diff --git a/vendor/github.com/docker/engine-api/types/reference/image_reference.go b/vendor/github.com/docker/engine-api/types/reference/image_reference.go deleted file mode 100644 index be9cf8ebed0..00000000000 --- a/vendor/github.com/docker/engine-api/types/reference/image_reference.go +++ /dev/null @@ -1,34 +0,0 @@ -package reference - -import ( - distreference "github.com/docker/distribution/reference" -) - -// Parse parses the given references and returns the repository and -// tag (if present) from it. If there is an error during parsing, it will -// return an error. -func Parse(ref string) (string, string, error) { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return "", "", err - } - - tag := GetTagFromNamedRef(distributionRef) - return distributionRef.Name(), tag, nil -} - -// GetTagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api makes the distinction between repository -// and tags. -func GetTagFromNamedRef(ref distreference.Named) string { - var tag string - switch x := ref.(type) { - case distreference.Digested: - tag = x.Digest().String() - case distreference.NamedTagged: - tag = x.Tag() - default: - tag = "latest" - } - return tag -} diff --git a/vendor/github.com/docker/engine-api/types/reference/image_reference_test.go b/vendor/github.com/docker/engine-api/types/reference/image_reference_test.go deleted file mode 100644 index 61fb676b6c1..00000000000 --- a/vendor/github.com/docker/engine-api/types/reference/image_reference_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package reference - -import ( - "testing" -) - -func TestParse(t *testing.T) { - testCases := []struct { - ref string - expectedName string - expectedTag string - expectedError bool - }{ - { - ref: "", - expectedName: "", - expectedTag: "", - expectedError: true, - }, - { - ref: "repository", - expectedName: "repository", - expectedTag: "latest", - expectedError: false, - }, - { - ref: "repository:tag", - expectedName: "repository", - expectedTag: "tag", - expectedError: false, - }, - { - ref: "test.com/repository", - expectedName: "test.com/repository", - expectedTag: "latest", - expectedError: false, - }, - { - ref: "test.com:5000/test/repository", - expectedName: "test.com:5000/test/repository", - expectedTag: "latest", - expectedError: false, - }, - { - ref: "test.com:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - expectedName: "test.com:5000/repo", - expectedTag: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - expectedError: false, - }, - { - ref: "test.com:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - expectedName: "test.com:5000/repo", - expectedTag: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - expectedError: false, - }, - } - - for _, c := range testCases { - name, tag, err := Parse(c.ref) - if err != nil && c.expectedError { - continue - } else if err != nil { - t.Fatalf("error with %s: %s", c.ref, err.Error()) - } - if name != c.expectedName { - t.Fatalf("expected name %s, got %s", c.expectedName, name) - } - if tag != c.expectedTag { - t.Fatalf("expected tag %s, got %s", c.expectedTag, tag) - } - } -} diff --git a/vendor/github.com/docker/engine-api/types/stats.go b/vendor/github.com/docker/engine-api/types/stats.go deleted file mode 100644 index b420ebe7f6a..00000000000 --- a/vendor/github.com/docker/engine-api/types/stats.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds. - TotalUsage uint64 `json:"total_usage"` - // Total CPU time consumed per core. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage"` - // Time spent by tasks of the cgroup in kernel mode. - // Units: nanoseconds. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - // Time spent by tasks of the cgroup in user mode. - // Units: nanoseconds. - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - CPUUsage CPUUsage `json:"cpu_usage"` - SystemUsage uint64 `json:"system_cpu_usage"` - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates All memory stats since container inception -type MemoryStats struct { - // current res_counter usage for memory - Usage uint64 `json:"usage"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt"` - Limit uint64 `json:"limit"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// TODO Windows: This can be factored out -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write -// TODO Windows: This can be factored out -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// NetworkStats aggregates All network stats of one container -// TODO Windows: This will require refactoring -type NetworkStats struct { - RxBytes uint64 `json:"rx_bytes"` - RxPackets uint64 `json:"rx_packets"` - RxErrors uint64 `json:"rx_errors"` - RxDropped uint64 `json:"rx_dropped"` - TxBytes uint64 `json:"tx_bytes"` - TxPackets uint64 `json:"tx_packets"` - TxErrors uint64 `json:"tx_errors"` - TxDropped uint64 `json:"tx_dropped"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - Read time.Time `json:"read"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` - CPUStats CPUStats `json:"cpu_stats,omitempty"` - MemoryStats MemoryStats `json:"memory_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - PidsStats PidsStats `json:"pids_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/time/timestamp.go b/vendor/github.com/docker/engine-api/types/time/timestamp.go deleted file mode 100644 index d3695ba723b..00000000000 --- a/vendor/github.com/docker/engine-api/types/time/timestamp.go +++ /dev/null @@ -1,124 +0,0 @@ -package time - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// These are additional predefined layouts for use in Time.Format and Time.Parse -// with --since and --until parameters for `docker logs` and `docker events` -const ( - rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone - rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone - dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 - dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) (string, error) { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil - } - - var format string - var parseInLocation bool - - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - - if strings.Contains(value, ".") { - if parseInLocation { - format = rFC3339NanoLocal - } else { - format = time.RFC3339Nano - } - } else if strings.Contains(value, "T") { - // we want the number of colons in the T portion of the timestamp - tcolons := strings.Count(value, ":") - // if parseInLocation is off and we have a +/- zone offset (not Z) then - // there will be an extra colon in the input for the tz offset subtract that - // colon from the tcolons count - if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { - tcolons-- - } - if parseInLocation { - switch tcolons { - case 0: - format = "2006-01-02T15" - case 1: - format = "2006-01-02T15:04" - default: - format = rFC3339Local - } - } else { - switch tcolons { - case 0: - format = "2006-01-02T15Z07:00" - case 1: - format = "2006-01-02T15:04Z07:00" - default: - format = time.RFC3339 - } - } - } else if parseInLocation { - format = dateLocal - } else { - format = dateWithZone - } - - var t time.Time - var err error - - if parseInLocation { - t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) - } else { - t, err = time.Parse(format, value) - } - - if err != nil { - // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp - if strings.Contains(value, "-") { - return "", err // was probably an RFC3339 like timestamp but the parser failed with an error - } - return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) - } - - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil -} - -// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the -// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) -// if the incoming nanosecond portion is longer or shorter than 9 digits it is -// converted to nanoseconds. The expectation is that the seconds and -// seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) -// returns seconds as def(aultSeconds) if value == "" -func ParseTimestamps(value string, def int64) (int64, int64, error) { - if value == "" { - return def, 0, nil - } - sa := strings.SplitN(value, ".", 2) - s, err := strconv.ParseInt(sa[0], 10, 64) - if err != nil { - return s, 0, err - } - if len(sa) != 2 { - return s, 0, nil - } - n, err := strconv.ParseInt(sa[1], 10, 64) - if err != nil { - return s, n, err - } - // should already be in nanoseconds but just in case convert n to nanoseonds - n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) - return s, n, nil -} diff --git a/vendor/github.com/docker/engine-api/types/time/timestamp_test.go b/vendor/github.com/docker/engine-api/types/time/timestamp_test.go deleted file mode 100644 index a1651309d71..00000000000 --- a/vendor/github.com/docker/engine-api/types/time/timestamp_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package time - -import ( - "fmt" - "testing" - "time" -) - -func TestGetTimestamp(t *testing.T) { - now := time.Now().In(time.UTC) - cases := []struct { - in, expected string - expectedErr bool - }{ - // Partial RFC3339 strings get parsed with second precision - {"2006-01-02T15:04:05.999999999+07:00", "1136189045.999999999", false}, - {"2006-01-02T15:04:05.999999999Z", "1136214245.999999999", false}, - {"2006-01-02T15:04:05.999999999", "1136214245.999999999", false}, - {"2006-01-02T15:04:05Z", "1136214245.000000000", false}, - {"2006-01-02T15:04:05", "1136214245.000000000", false}, - {"2006-01-02T15:04:0Z", "", true}, - {"2006-01-02T15:04:0", "", true}, - {"2006-01-02T15:04Z", "1136214240.000000000", false}, - {"2006-01-02T15:04+00:00", "1136214240.000000000", false}, - {"2006-01-02T15:04-00:00", "1136214240.000000000", false}, - {"2006-01-02T15:04", "1136214240.000000000", false}, - {"2006-01-02T15:0Z", "", true}, - {"2006-01-02T15:0", "", true}, - {"2006-01-02T15Z", "1136214000.000000000", false}, - {"2006-01-02T15+00:00", "1136214000.000000000", false}, - {"2006-01-02T15-00:00", "1136214000.000000000", false}, - {"2006-01-02T15", "1136214000.000000000", false}, - {"2006-01-02T1Z", "1136163600.000000000", false}, - {"2006-01-02T1", "1136163600.000000000", false}, - {"2006-01-02TZ", "", true}, - {"2006-01-02T", "", true}, - {"2006-01-02+00:00", "1136160000.000000000", false}, - {"2006-01-02-00:00", "1136160000.000000000", false}, - {"2006-01-02-00:01", "1136160060.000000000", false}, - {"2006-01-02Z", "1136160000.000000000", false}, - {"2006-01-02", "1136160000.000000000", false}, - {"2015-05-13T20:39:09Z", "1431549549.000000000", false}, - - // unix timestamps returned as is - {"1136073600", "1136073600", false}, - {"1136073600.000000001", "1136073600.000000001", false}, - // Durations - {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix()), false}, - {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, - {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, - - // String fallback - {"invalid", "invalid", false}, - } - - for _, c := range cases { - o, err := GetTimestamp(c.in, now) - if o != c.expected || - (err == nil && c.expectedErr) || - (err != nil && !c.expectedErr) { - t.Errorf("wrong value for '%s'. expected:'%s' got:'%s' with error: `%s`", c.in, c.expected, o, err) - t.Fail() - } - } -} - -func TestParseTimestamps(t *testing.T) { - cases := []struct { - in string - def, expectedS, expectedN int64 - expectedErr bool - }{ - // unix timestamps - {"1136073600", 0, 1136073600, 0, false}, - {"1136073600.000000001", 0, 1136073600, 1, false}, - {"1136073600.0000000010", 0, 1136073600, 1, false}, - {"1136073600.00000001", 0, 1136073600, 10, false}, - {"foo.bar", 0, 0, 0, true}, - {"1136073600.bar", 0, 1136073600, 0, true}, - {"", -1, -1, 0, false}, - } - - for _, c := range cases { - s, n, err := ParseTimestamps(c.in, c.def) - if s != c.expectedS || - n != c.expectedN || - (err == nil && c.expectedErr) || - (err != nil && !c.expectedErr) { - t.Errorf("wrong values for input `%s` with default `%d` expected:'%d'seconds and `%d`nanosecond got:'%d'seconds and `%d`nanoseconds with error: `%s`", c.in, c.def, c.expectedS, c.expectedN, s, n, err) - t.Fail() - } - } -} diff --git a/vendor/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go deleted file mode 100644 index cb2dc9ac9d7..00000000000 --- a/vendor/github.com/docker/engine-api/types/types.go +++ /dev/null @@ -1,473 +0,0 @@ -package types - -import ( - "os" - "time" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/registry" - "github.com/docker/go-connections/nat" -) - -// ContainerCreateResponse contains the information returned to a client on the -// creation of a new container. -type ContainerCreateResponse struct { - // ID is the ID of the created container. - ID string `json:"Id"` - - // Warnings are any warnings encountered during the creation of the container. - Warnings []string `json:"Warnings"` -} - -// ContainerExecCreateResponse contains response of Remote API: -// POST "/containers/{name:.*}/exec" -type ContainerExecCreateResponse struct { - // ID is the exec ID. - ID string `json:"Id"` -} - -// ContainerUpdateResponse contains response of Remote API: -// POST /containers/{name:.*}/update -type ContainerUpdateResponse struct { - // Warnings are any warnings encountered during the updating of the container. - Warnings []string `json:"Warnings"` -} - -// AuthResponse contains response of Remote API: -// POST "/auth" -type AuthResponse struct { - // Status is the authentication status - Status string `json:"Status"` - - // IdentityToken is an opaque token used for authenticating - // a user after a successful login. - IdentityToken string `json:"IdentityToken,omitempty"` -} - -// ContainerWaitResponse contains response of Remote API: -// POST "/containers/"+containerID+"/wait" -type ContainerWaitResponse struct { - // StatusCode is the status code of the wait job - StatusCode int `json:"StatusCode"` -} - -// ContainerCommitResponse contains response of Remote API: -// POST "/commit?container="+containerID -type ContainerCommitResponse struct { - ID string `json:"Id"` -} - -// ContainerChange contains response of Remote API: -// GET "/containers/{name:.*}/changes" -type ContainerChange struct { - Kind int - Path string -} - -// ImageHistory contains response of Remote API: -// GET "/images/{name:.*}/history" -type ImageHistory struct { - ID string `json:"Id"` - Created int64 - CreatedBy string - Tags []string - Size int64 - Comment string -} - -// ImageDelete contains response of Remote API: -// DELETE "/images/{name:.*}" -type ImageDelete struct { - Untagged string `json:",omitempty"` - Deleted string `json:",omitempty"` -} - -// Image contains response of Remote API: -// GET "/images/json" -type Image struct { - ID string `json:"Id"` - ParentID string `json:"ParentId"` - RepoTags []string - RepoDigests []string - Created int64 - Size int64 - VirtualSize int64 - Labels map[string]string -} - -// GraphDriverData returns Image's graph driver config info -// when calling inspect command -type GraphDriverData struct { - Name string - Data map[string]string -} - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` -} - -// ImageInspect contains response of Remote API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Os string - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS -} - -// Port stores open ports info of container -// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} -type Port struct { - IP string `json:",omitempty"` - PrivatePort int - PublicPort int `json:",omitempty"` - Type string -} - -// Container contains response of Remote API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Remote API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerProcessList contains response of Remote API: -// GET "/containers/{name:.*}/top" -type ContainerProcessList struct { - Processes [][]string - Titles []string -} - -// Version contains response of Remote API: -// GET "/version" -type Version struct { - Version string - APIVersion string `json:"ApiVersion"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Info contains response of Remote API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - ExecutionDriver string - LoggingDriver string - CgroupDriver string - NEventsListener int - KernelVersion string - OperatingSystem string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string - ClusterAdvertise string - SecurityOptions []string -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string -} - -// ContainerNode stores information about the node that a container -// is running on. It's only available in Docker Swarm -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int - Labels map[string]string -} - -// ContainerJSONBase contains response of Remote API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` - Name string - RestartCount int - Driver string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string - SandboxID string - HairpinMode bool - LinkLocalIPv6Address string - LinkLocalIPv6PrefixLen int - Ports nat.PortMap - SandboxKey string - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string - Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - IPAddress string - IPPrefixLen int - IPv6Gateway string - MacAddress string -} - -// MountPoint represents a mount point configuration inside the container. -type MountPoint struct { - Name string `json:",omitempty"` - Source string - Destination string - Driver string `json:",omitempty"` - Mode string - RW bool - Propagation string -} - -// Volume represents the configuration of a volume for the remote API -type Volume struct { - Name string // Name is the name of the volume - Driver string // Driver is the Driver name used to create the volume - Mountpoint string // Mountpoint is the location on disk of the volume - Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume - Labels map[string]string // Labels is metadata specific to the volume - Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level) -} - -// VolumesListResponse contains the response for the remote API: -// GET "/volumes" -type VolumesListResponse struct { - Volumes []*Volume // Volumes is the list of volumes being returned - Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers -} - -// VolumeCreateRequest contains the response for the remote API: -// POST "/volumes/create" -type VolumeCreateRequest struct { - Name string // Name is the requested name of the volume - Driver string // Driver is the name of the driver that should be used to create the volume - DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. - Labels map[string]string // Labels holds metadata specific to the volume being created. -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string - ID string `json:"Id"` - Scope string - Driver string - EnableIPv6 bool - IPAM network.IPAM - Internal bool - Containers map[string]EndpointResource - Options map[string]string - Labels map[string]string -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - CheckDuplicate bool - Driver string - EnableIPv6 bool - IPAM network.IPAM - Internal bool - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index 3d469165ab5..4d5f5ae63af 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -85,14 +85,10 @@ func (p Port) Port() string { // Int returns the port number of a Port as an int func (p Port) Int() int { portStr := p.Port() - if len(portStr) == 0 { - return 0 - } - // We don't need to check for an error because we're going to // assume that any error would have been found, and reported, in NewPort() - port, _ := strconv.ParseUint(portStr, 10, 16) - return int(port) + port, _ := ParsePort(portStr) + return port } // Range returns the start/end port numbers of a Port range as ints @@ -132,92 +128,115 @@ func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, exposedPorts = make(map[Port]struct{}, len(ports)) bindings = make(map[Port][]PortBinding) ) - for _, rawPort := range ports { - proto := "tcp" - - if i := strings.LastIndex(rawPort, "/"); i != -1 { - proto = rawPort[i+1:] - rawPort = rawPort[:i] - } - if !strings.Contains(rawPort, ":") { - rawPort = fmt.Sprintf("::%s", rawPort) - } else if len(strings.Split(rawPort, ":")) == 2 { - rawPort = fmt.Sprintf(":%s", rawPort) - } - - parts, err := PartParser(portSpecTemplate, rawPort) + portMappings, err := ParsePortSpec(rawPort) if err != nil { return nil, nil, err } - var ( - containerPort = parts["containerPort"] - rawIP = parts["ip"] - hostPort = parts["hostPort"] - ) - - if rawIP != "" && net.ParseIP(rawIP) == nil { - return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP) - } - if containerPort == "" { - return nil, nil, fmt.Errorf("No port specified: %s", rawPort) + for _, portMapping := range portMappings { + port := portMapping.Port + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, portMapping.Binding) } + } + return exposedPorts, bindings, nil +} + +// PortMapping is a data object mapping a Port to a PortBinding +type PortMapping struct { + Port Port + Binding PortBinding +} + +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + +// ParsePortSpec parses a port specification string into a slice of PortMappings +func ParsePortSpec(rawPort string) ([]PortMapping, error) { + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) + + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + } + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) + } + if containerPort == "" { + return nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } - startPort, endPort, err := ParsePortRange(containerPort) + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) if err != nil { - return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) } + } - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) - } + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) } + } - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } + if !validateProto(strings.ToLower(proto)) { + return nil, fmt.Errorf("Invalid proto: %s", proto) + } - if !validateProto(strings.ToLower(proto)) { - return nil, nil, fmt.Errorf("Invalid proto: %s", proto) + ports := []PortMapping{} + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, err } - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, nil, err - } - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - - binding := PortBinding{ - HostIP: rawIP, - HostPort: hostPort, - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, binding) + binding := PortBinding{ + HostIP: ip, + HostPort: hostPort, } + ports = append(ports, PortMapping{Port: port, Binding: binding}) } - return exposedPorts, bindings, nil + return ports, nil } diff --git a/vendor/github.com/docker/go-connections/nat/nat_test.go b/vendor/github.com/docker/go-connections/nat/nat_test.go index 651dcd307d0..787d5ac2338 100644 --- a/vendor/github.com/docker/go-connections/nat/nat_test.go +++ b/vendor/github.com/docker/go-connections/nat/nat_test.go @@ -2,6 +2,8 @@ package nat import ( "testing" + + "github.com/stretchr/testify/assert" ) func TestParsePort(t *testing.T) { @@ -171,6 +173,62 @@ func TestSplitProtoPort(t *testing.T) { } } +func TestParsePortSpecFull(t *testing.T) { + portMappings, err := ParsePortSpec("0.0.0.0:1234-1235:3333-3334/tcp") + assert.Nil(t, err) + + expected := []PortMapping{ + { + Port: "3333/tcp", + Binding: PortBinding{ + HostIP: "0.0.0.0", + HostPort: "1234", + }, + }, + { + Port: "3334/tcp", + Binding: PortBinding{ + HostIP: "0.0.0.0", + HostPort: "1235", + }, + }, + } + + assert.Equal(t, expected, portMappings) +} + +func TestPartPortSpecIPV6(t *testing.T) { + portMappings, err := ParsePortSpec("[2001:4860:0:2001::68]::333") + assert.Nil(t, err) + + expected := []PortMapping{ + { + Port: "333/tcp", + Binding: PortBinding{ + HostIP: "2001:4860:0:2001::68", + HostPort: "", + }, + }, + } + assert.Equal(t, expected, portMappings) +} + +func TestPartPortSpecIPV6WithHostPort(t *testing.T) { + portMappings, err := ParsePortSpec("[::1]:80:80") + assert.Nil(t, err) + + expected := []PortMapping{ + { + Port: "80/tcp", + Binding: PortBinding{ + HostIP: "::1", + HostPort: "80", + }, + }, + } + assert.Equal(t, expected, portMappings) +} + func TestParsePortSpecs(t *testing.T) { var ( portMap map[Port]struct{} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go index 872050205f4..892adf8c667 100644 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -8,6 +8,7 @@ import ( // PartParser parses and validates the specified string (data) using the specified template // e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version func PartParser(template, data string) (map[string]string, error) { // ip:public:private var ( diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go index 3395e40229d..99846ffddb1 100644 --- a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go @@ -79,11 +79,3 @@ func (a dummyAddr) Network() string { func (a dummyAddr) String() string { return string(a) } - -// timeoutError is used when there is a timeout with a connection -// this implements the net.Error interface -type timeoutError struct{} - -func (e *timeoutError) Error() string { return "i/o timeout" } -func (e *timeoutError) Timeout() bool { return true } -func (e *timeoutError) Temporary() bool { return true } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go index 1739cecf2a5..a1d7beb4d80 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -2,6 +2,7 @@ package sockets import ( + "errors" "net" "net/http" "time" @@ -10,6 +11,9 @@ import ( // Why 32? See https://github.com/docker/docker/pull/8035. const defaultTimeout = 32 * time.Second +// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. +var ErrProtocolNotAvailable = errors.New("protocol not available") + // ConfigureTransport configures the specified Transport according to the // specified proto and addr. // If the proto is unix (using a unix socket to communicate) or npipe the @@ -17,17 +21,9 @@ const defaultTimeout = 32 * time.Second func ConfigureTransport(tr *http.Transport, proto, addr string) error { switch proto { case "unix": - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, defaultTimeout) - } + return configureUnixTransport(tr, proto, addr) case "npipe": - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return DialPipe(addr, defaultTimeout) - } + return configureNpipeTransport(tr, proto, addr) default: tr.Proxy = http.ProxyFromEnvironment dialer, err := DialerFromEnvironment(&net.Dialer{ diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go index b255ac9ac7a..386cf0dbbde 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -3,11 +3,31 @@ package sockets import ( + "fmt" "net" + "net/http" "syscall" "time" ) +const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("Unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + return nil +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + // DialPipe connects to a Windows named pipe. // This is not supported on other OSes. func DialPipe(_ string, _ time.Duration) (net.Conn, error) { diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go index 1f3540b2fe8..5c21644e1fe 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -2,11 +2,25 @@ package sockets import ( "net" + "net/http" "time" "github.com/Microsoft/go-winio" ) +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + return nil +} + // DialPipe connects to a Windows named pipe. func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { return winio.DialPipe(addr, &timeout) diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go index 8a82727df00..53cbb6c79e4 100644 --- a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go @@ -7,7 +7,7 @@ import ( ) // NewTCPSocket creates a TCP socket listener with the specified address and -// and the specified tls configuration. If TLSConfig is set, will encapsulate the +// the specified tls configuration. If TLSConfig is set, will encapsulate the // TCP listener inside a TLS one. func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { l, err := net.Listen("tcp", addr) diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go index d1627349f82..a8b5dbb6fdc 100644 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -1,30 +1,26 @@ -// +build linux freebsd solaris +// +build !windows package sockets import ( - "fmt" "net" "os" - "strconv" "syscall" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/user" ) // NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path, group string) (net.Listener, error) { +func NewUnixSocket(path string, gid int) (net.Listener, error) { if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { return nil, err } mask := syscall.Umask(0777) defer syscall.Umask(mask) + l, err := net.Listen("unix", path) if err != nil { return nil, err } - if err := setSocketGroup(path, group); err != nil { + if err := os.Chown(path, 0, gid); err != nil { l.Close() return nil, err } @@ -34,47 +30,3 @@ func NewUnixSocket(path, group string) (net.Listener, error) { } return l, nil } - -func setSocketGroup(path, group string) error { - if group == "" { - return nil - } - if err := changeGroup(path, group); err != nil { - if group != "docker" { - return err - } - logrus.Debugf("Warning: could not change group %s to docker: %v", path, err) - } - return nil -} - -func changeGroup(path string, nameOrGid string) error { - gid, err := lookupGidByName(nameOrGid) - if err != nil { - return err - } - logrus.Debugf("%s group found. gid: %d", nameOrGid, gid) - return os.Chown(path, 0, gid) -} - -func lookupGidByName(nameOrGid string) (int, error) { - groupFile, err := user.GetGroupPath() - if err != nil { - return -1, err - } - groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { - return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid - }) - if err != nil { - return -1, err - } - if groups != nil && len(groups) > 0 { - return groups[0].Gid, nil - } - gid, err := strconv.Atoi(nameOrGid) - if err == nil { - logrus.Warnf("Could not find GID %d", gid) - return gid, nil - } - return -1, fmt.Errorf("Group %s not found", nameOrGid) -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 00000000000..1ca0965e06e --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,18 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 00000000000..9ca974539a7 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" + +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + return x509.NewCertPool(), nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 1ba04395e2a..1b31bbb8b1b 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -8,11 +8,12 @@ package tlsconfig import ( "crypto/tls" "crypto/x509" + "encoding/pem" "fmt" "io/ioutil" "os" - "github.com/Sirupsen/logrus" + "github.com/pkg/errors" ) // Options represents the information needed to create client and server TLS configurations. @@ -29,6 +30,14 @@ type Options struct { InsecureSkipVerify bool // server-only option ClientAuth tls.ClientAuthType + // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS + // creds will include exclusively the roots in that CA file. If no CA file is provided, + // the system pool will be used. + ExclusiveRootPools bool + MinVersion uint16 + // If Passphrase is set, it will be used to decrypt a TLS private key + // if the key is encrypted + Passphrase string } // Extra (server-side) accepted CBC cipher suites - will phase out in the future @@ -46,62 +55,170 @@ var acceptedCBCCiphers = []uint16{ // known weak algorithms removed. var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) -// ServerDefault is a secure-enough TLS configuration for the server TLS configuration. -var ServerDefault = tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionSSL30: {}, + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, } -// ClientDefault is a secure-enough TLS configuration for the client TLS configuration. -var ClientDefault = tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, +// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. +func ServerDefault() *tls.Config { + return &tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, + } +} + +// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. +func ClientDefault() *tls.Config { + return &tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, + } } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string) (*x509.CertPool, error) { +func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { // If we should verify the server, we need to load a trusted ca - certPool := x509.NewCertPool() + var ( + certPool *x509.CertPool + err error + ) + if exclusivePool { + certPool = x509.NewCertPool() + } else { + certPool, err = SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } + } pem, err := ioutil.ReadFile(caFile) if err != nil { - return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } if !certPool.AppendCertsFromPEM(pem) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } - logrus.Debugf("Trusting %d certs", len(certPool.Subjects())) return certPool, nil } +// isValidMinVersion checks that the input value is a valid tls minimum version +func isValidMinVersion(version uint16) bool { + _, ok := allTLSVersions[version] + return ok +} + +// adjustMinVersion sets the MinVersion on `config`, the input configuration. +// It assumes the current MinVersion on the `config` is the lowest allowed. +func adjustMinVersion(options Options, config *tls.Config) error { + if options.MinVersion > 0 { + if !isValidMinVersion(options.MinVersion) { + return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + } + if options.MinVersion < config.MinVersion { + return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + } + config.MinVersion = options.MinVersion + } + + return nil +} + +// IsErrEncryptedKey returns true if the 'err' is an error of incorrect +// password when tryin to decrypt a TLS private key +func IsErrEncryptedKey(err error) bool { + return errors.Cause(err) == x509.IncorrectPasswordError +} + +// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. +// If the private key is encrypted, 'passphrase' is used to decrypted the +// private key. +func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { + // this section makes some small changes to code from notary/tuf/utils/x509.go + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, fmt.Errorf("no valid private key found") + } + + var err error + if x509.IsEncryptedPEMBlock(pemBlock) { + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + } + keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + } + + return keyBytes, nil +} + +// getCert returns a Certificate from the CertFile and KeyFile in 'options', +// if the key is encrypted, the Passphrase in 'options' will be used to +// decrypt it. +func getCert(options Options) ([]tls.Certificate, error) { + if options.CertFile == "" && options.KeyFile == "" { + return nil, nil + } + + errMessage := "Could not load X509 key pair" + + cert, err := ioutil.ReadFile(options.CertFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return []tls.Certificate{tlsCert}, nil +} + // Client returns a TLS configuration meant to be used by a client. func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault + tlsConfig := ClientDefault() tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify if !options.InsecureSkipVerify && options.CAFile != "" { - CAs, err := certPool(options.CAFile) + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) if err != nil { return nil, err } tlsConfig.RootCAs = CAs } - if options.CertFile != "" || options.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} + tlsCerts, err := getCert(options) + if err != nil { + return nil, err + } + tlsConfig.Certificates = tlsCerts + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err } - return &tlsConfig, nil + return tlsConfig, nil } // Server returns a TLS configuration meant to be used by a server. func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault + tlsConfig := ServerDefault() tlsConfig.ClientAuth = options.ClientAuth tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { @@ -111,12 +228,17 @@ func Server(options Options) (*tls.Config, error) { return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) } tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven { - CAs, err := certPool(options.CAFile) + if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) if err != nil { return nil, err } tlsConfig.ClientCAs = CAs } - return &tlsConfig, nil + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_test.go b/vendor/github.com/docker/go-connections/tlsconfig/config_test.go index e0d2d6b8076..02131d6b8c6 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_test.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_test.go @@ -2,120 +2,75 @@ package tlsconfig import ( "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" "crypto/tls" "crypto/x509" - "crypto/x509/pkix" "encoding/pem" - "io" "io/ioutil" - "math/big" "os" - "path/filepath" "reflect" "testing" - "time" ) -var certTemplate = x509.Certificate{ - SerialNumber: big.NewInt(199999), - Subject: pkix.Name{ - CommonName: "test", - }, - NotBefore: time.Now().AddDate(-1, 1, 1), - NotAfter: time.Now().AddDate(1, 1, 1), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, +// This is the currently active LetsEncrypt IdenTrust cross-signed CA cert. It expires Mar 17, 2021. +const ( + systemRootTrustedCert = ` +-----BEGIN CERTIFICATE----- +MIIEkjCCA3qgAwIBAgIQCgFBQgAAAVOFc2oLheynCDANBgkqhkiG9w0BAQsFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTE2MDMxNzE2NDA0NloXDTIxMDMxNzE2NDA0Nlow +SjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUxldCdzIEVuY3J5cHQxIzAhBgNVBAMT +GkxldCdzIEVuY3J5cHQgQXV0aG9yaXR5IFgzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAnNMM8FrlLke3cl03g7NoYzDq1zUmGSXhvb418XCSL7e4S0EF +q6meNQhY7LEqxGiHC6PjdeTm86dicbp5gWAf15Gan/PQeGdxyGkOlZHP/uaZ6WA8 +SMx+yk13EiSdRxta67nsHjcAHJyse6cF6s5K671B5TaYucv9bTyWaN8jKkKQDIZ0 +Z8h/pZq4UmEUEz9l6YKHy9v6Dlb2honzhT+Xhq+w3Brvaw2VFn3EK6BlspkENnWA +a6xK8xuQSXgvopZPKiAlKQTGdMDQMc2PMTiVFrqoM7hD8bEfwzB/onkxEz0tNvjj +/PIzark5McWvxI0NHWQWM6r6hCm21AvA2H3DkwIDAQABo4IBfTCCAXkwEgYDVR0T +AQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwfwYIKwYBBQUHAQEEczBxMDIG +CCsGAQUFBzABhiZodHRwOi8vaXNyZy50cnVzdGlkLm9jc3AuaWRlbnRydXN0LmNv +bTA7BggrBgEFBQcwAoYvaHR0cDovL2FwcHMuaWRlbnRydXN0LmNvbS9yb290cy9k +c3Ryb290Y2F4My5wN2MwHwYDVR0jBBgwFoAUxKexpHsscfrb4UuQdf/EFWCFiRAw +VAYDVR0gBE0wSzAIBgZngQwBAgEwPwYLKwYBBAGC3xMBAQEwMDAuBggrBgEFBQcC +ARYiaHR0cDovL2Nwcy5yb290LXgxLmxldHNlbmNyeXB0Lm9yZzA8BgNVHR8ENTAz +MDGgL6AthitodHRwOi8vY3JsLmlkZW50cnVzdC5jb20vRFNUUk9PVENBWDNDUkwu +Y3JsMB0GA1UdDgQWBBSoSmpjBH3duubRObemRWXv86jsoTANBgkqhkiG9w0BAQsF +AAOCAQEA3TPXEfNjWDjdGBX7CVW+dla5cEilaUcne8IkCJLxWh9KEik3JHRRHGJo +uM2VcGfl96S8TihRzZvoroed6ti6WqEBmtzw3Wodatg+VyOeph4EYpr/1wXKtx8/ +wApIvJSwtmVi4MFU5aMqrSDE6ea73Mj2tcMyo5jMd6jmeWUHK8so/joWUoHOUgwu +X4Po1QYz+3dszkDqMp4fklxBwXRsW10KXzPMTZ+sOPAveyxindmjkW8lGy+QsRlG +PfZ+G6Z6h7mjem0Y+iWlkYcV4PIWL1iwBi8saCbGS5jN2p8M+X+Q7UNKEkROb3N6 +KOqkqm57TH2H3eDJAkSnh6/DNFu0Qg== +-----END CERTIFICATE----- +` + rsaPrivateKeyFile = "fixtures/key.pem" + certificateFile = "fixtures/cert.pem" + multiCertificateFile = "fixtures/multi.pem" + rsaEncryptedPrivateKeyFile = "fixtures/encrypted_key.pem" + certificateOfEncryptedKeyFile = "fixtures/cert_of_encrypted_key.pem" +) - BasicConstraintsValid: true, +// returns the name of a pre-generated, multiple-certificate CA file +// with both RSA and ECDSA certs. +func getMultiCert() string { + return multiCertificateFile } -func generateCertificate(t *testing.T, signer crypto.Signer, out io.Writer) { - derBytes, err := x509.CreateCertificate(rand.Reader, &certTemplate, &certTemplate, signer.Public(), signer) - if err != nil { - t.Fatal("Unable to generate a certificate", err.Error()) - } - - if err = pem.Encode(out, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { - t.Fatal("Unable to write cert to file", err.Error()) - } +// returns the names of pre-generated key and certificate files. +func getCertAndKey() (string, string) { + return rsaPrivateKeyFile, certificateFile } -// generates a multiple-certificate file with both RSA and ECDSA certs and -// returns the filename so that cleanup can be deferred. -func generateMultiCert(t *testing.T, tempDir string) string { - certOut, err := os.Create(filepath.Join(tempDir, "multi")) - if err != nil { - t.Fatal("Unable to create file to write multi-cert to", err.Error()) - } - defer certOut.Close() - - rsaKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal("Unable to generate RSA key for multi-cert", err.Error()) - } - ecKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - t.Fatal("Unable to generate ECDSA key for multi-cert", err.Error()) - } - - for _, signer := range []crypto.Signer{rsaKey, ecKey} { - generateCertificate(t, signer, certOut) - } - - return certOut.Name() -} - -func generateCertAndKey(t *testing.T, tempDir string) (string, string) { - rsaKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal("Unable to generate RSA key", err.Error()) - - } - keyBytes := x509.MarshalPKCS1PrivateKey(rsaKey) - - keyOut, err := os.Create(filepath.Join(tempDir, "key")) - if err != nil { - t.Fatal("Unable to create file to write key to", err.Error()) - - } - defer keyOut.Close() - - if err = pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: keyBytes}); err != nil { - t.Fatal("Unable to write key to file", err.Error()) - } - - certOut, err := os.Create(filepath.Join(tempDir, "cert")) - if err != nil { - t.Fatal("Unable to create file to write cert to", err.Error()) - } - defer certOut.Close() - - generateCertificate(t, rsaKey, certOut) - - return keyOut.Name(), certOut.Name() -} - -func makeTempDir(t *testing.T) string { - tempDir, err := ioutil.TempDir("", "tlsconfig-test") - if err != nil { - t.Fatal("Could not make a temporary directory", err.Error()) - } - return tempDir +// returns the names of pre-generated, encrypted private key and +// corresponding certificate file +func getCertAndEncryptedKey() (string, string) { + return rsaEncryptedPrivateKeyFile, certificateOfEncryptedKeyFile } // If the cert files and directory are provided but are invalid, an error is // returned. func TestConfigServerTLSFailsIfUnableToLoadCerts(t *testing.T) { - tempDir := makeTempDir(t) - defer os.RemoveAll(tempDir) - key, cert := generateCertAndKey(t, tempDir) - ca := generateMultiCert(t, tempDir) + key, cert := getCertAndKey() + ca := getMultiCert() tempFile, err := ioutil.TempFile("", "cert-test") if err != nil { @@ -145,9 +100,7 @@ func TestConfigServerTLSFailsIfUnableToLoadCerts(t *testing.T) { // If server cert and key are provided and client auth and client CA are not // set, a tls config with only the server certs will be returned. func TestConfigServerTLSServerCertsOnly(t *testing.T) { - tempDir := makeTempDir(t) - defer os.RemoveAll(tempDir) - key, cert := generateCertAndKey(t, tempDir) + key, cert := getCertAndKey() keypair, err := tls.LoadX509KeyPair(cert, key) if err != nil { @@ -188,10 +141,8 @@ func TestConfigServerTLSServerCertsOnly(t *testing.T) { // If client CA is provided, it will only be used if the client auth is >= // VerifyClientCertIfGiven func TestConfigServerTLSClientCANotSetIfClientAuthTooLow(t *testing.T) { - tempDir := makeTempDir(t) - defer os.RemoveAll(tempDir) - key, cert := generateCertAndKey(t, tempDir) - ca := generateMultiCert(t, tempDir) + key, cert := getCertAndKey() + ca := getMultiCert() tlsConfig, err := Server(Options{ CertFile: cert, @@ -218,10 +169,8 @@ func TestConfigServerTLSClientCANotSetIfClientAuthTooLow(t *testing.T) { // If client CA is provided, it will only be used if the client auth is >= // VerifyClientCertIfGiven func TestConfigServerTLSClientCASet(t *testing.T) { - tempDir := makeTempDir(t) - defer os.RemoveAll(tempDir) - key, cert := generateCertAndKey(t, tempDir) - ca := generateMultiCert(t, tempDir) + key, cert := getCertAndKey() + ca := getMultiCert() tlsConfig, err := Server(Options{ CertFile: cert, @@ -240,17 +189,166 @@ func TestConfigServerTLSClientCASet(t *testing.T) { if tlsConfig.ClientAuth != tls.VerifyClientCertIfGiven { t.Fatal("ClientAuth was not set to what was in the options") } - if tlsConfig.ClientCAs == nil || len(tlsConfig.ClientCAs.Subjects()) != 2 { + basePool, err := SystemCertPool() + if err != nil { + basePool = x509.NewCertPool() + } + // because we are not enabling `ExclusiveRootPools`, any root pool will also contain the system roots + if tlsConfig.ClientCAs == nil || len(tlsConfig.ClientCAs.Subjects()) != len(basePool.Subjects())+2 { t.Fatalf("Client CAs were never set correctly") } } +// Exclusive root pools determines whether the CA pool will be a union of the system +// certificate pool and custom certs, or an exclusive or of the custom certs and system pool +func TestConfigServerExclusiveRootPools(t *testing.T) { + key, cert := getCertAndKey() + ca := getMultiCert() + + caBytes, err := ioutil.ReadFile(ca) + if err != nil { + t.Fatal("Unable to read CA certs", err) + } + + var testCerts []*x509.Certificate + for _, pemBytes := range [][]byte{caBytes, []byte(systemRootTrustedCert)} { + pemBlock, _ := pem.Decode(pemBytes) + if pemBlock == nil { + t.Fatal("Malformed certificate") + } + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + t.Fatal("Unable to parse certificate") + } + testCerts = append(testCerts, cert) + } + + // ExclusiveRootPools not set, so should be able to verify both system-signed certs + // and custom CA-signed certs + tlsConfig, err := Server(Options{ + CertFile: cert, + KeyFile: key, + ClientAuth: tls.VerifyClientCertIfGiven, + CAFile: ca, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + for i, cert := range testCerts { + if _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}); err != nil { + t.Fatalf("Unable to verify certificate %d: %v", i, err) + } + } + + // ExclusiveRootPools set and custom CA provided, so system certs should not be verifiable + // and custom CA-signed certs should be verifiable + tlsConfig, err = Server(Options{ + CertFile: cert, + KeyFile: key, + ClientAuth: tls.VerifyClientCertIfGiven, + CAFile: ca, + ExclusiveRootPools: true, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}) + switch { + case i == 0 && err != nil: + t.Fatal("Unable to verify custom certificate, even though the root pool should have only the custom CA", err) + case i == 1 && err == nil: + t.Fatal("Successfully verified system root-signed certificate though the root pool should have only the cusotm CA", err) + } + } + + // No CA file provided, system cert should be verifiable only + tlsConfig, err = Server(Options{ + CertFile: cert, + KeyFile: key, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}) + switch { + case i == 1 && err != nil: + t.Fatal("Unable to verify system root-signed certificate, even though the root pool should be the system pool only", err) + case i == 0 && err == nil: + t.Fatal("Successfully verified custom certificate though the root pool should be the system pool only", err) + } + } +} + +// If a valid minimum version is specified in the options, the server's +// minimum version should be set accordingly +func TestConfigServerTLSMinVersionIsSetBasedOnOptions(t *testing.T) { + versions := []uint16{ + tls.VersionTLS11, + tls.VersionTLS12, + } + key, cert := getCertAndKey() + + for _, v := range versions { + tlsConfig, err := Server(Options{ + MinVersion: v, + CertFile: cert, + KeyFile: key, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + if tlsConfig.MinVersion != v { + t.Fatal("Unexpected minimum TLS version: ", tlsConfig.MinVersion) + } + } +} + +// An error should be returned if the specified minimum version for the server +// is too low, i.e. less than VersionTLS10 +func TestConfigServerTLSMinVersionNotSetIfMinVersionIsTooLow(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Server(Options{ + MinVersion: tls.VersionSSL30, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned an error for minimum version below TLS10") + } +} + +// An error should be returned if an invalid minimum version for the server is +// in the options struct +func TestConfigServerTLSMinVersionNotSetIfMinVersionIsInvalid(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Server(Options{ + MinVersion: 1, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned error on invalid minimum version option") + } +} + // The root CA is never set if InsecureSkipBoolean is set to true, but the // default client options are set func TestConfigClientTLSNoVerify(t *testing.T) { - tempDir := makeTempDir(t) - defer os.RemoveAll(tempDir) - ca := generateMultiCert(t, tempDir) + ca := getMultiCert() tlsConfig, err := Client(Options{CAFile: ca, InsecureSkipVerify: true}) @@ -301,17 +399,19 @@ func TestConfigClientTLSNoRoot(t *testing.T) { // The RootCA is set if the file is provided and InsecureSkipVerify is false func TestConfigClientTLSRootCAFileWithOneCert(t *testing.T) { - tempDir := makeTempDir(t) - defer os.RemoveAll(tempDir) - ca := generateMultiCert(t, tempDir) + ca := getMultiCert() tlsConfig, err := Client(Options{CAFile: ca}) if err != nil || tlsConfig == nil { t.Fatal("Unable to configure client TLS", err) } - - if tlsConfig.RootCAs == nil || len(tlsConfig.RootCAs.Subjects()) != 2 { + basePool, err := SystemCertPool() + if err != nil { + basePool = x509.NewCertPool() + } + // because we are not enabling `ExclusiveRootPools`, any root pool will also contain the system roots + if tlsConfig.RootCAs == nil || len(tlsConfig.RootCAs.Subjects()) != len(basePool.Subjects())+2 { t.Fatal("Root CAs not set properly", err) } if tlsConfig.Certificates != nil { @@ -331,9 +431,7 @@ func TestConfigClientTLSNonexistentRootCAFile(t *testing.T) { // An error is returned if either the client cert or the key are provided // but invalid or blank. func TestConfigClientTLSClientCertOrKeyInvalid(t *testing.T) { - tempDir := makeTempDir(t) - defer os.RemoveAll(tempDir) - key, cert := generateCertAndKey(t, tempDir) + key, cert := getCertAndKey() tempFile, err := ioutil.TempFile("", "cert-test") if err != nil { @@ -358,9 +456,7 @@ func TestConfigClientTLSClientCertOrKeyInvalid(t *testing.T) { // The certificate is set if the client cert and client key are provided and // valid. func TestConfigClientTLSValidClientCertAndKey(t *testing.T) { - tempDir := makeTempDir(t) - defer os.RemoveAll(tempDir) - key, cert := generateCertAndKey(t, tempDir) + key, cert := getCertAndKey() keypair, err := tls.LoadX509KeyPair(cert, key) if err != nil { @@ -389,3 +485,167 @@ func TestConfigClientTLSValidClientCertAndKey(t *testing.T) { t.Fatal("Root CAs should not have been set", err) } } + +// The certificate is set if the client cert and encrypted client key are +// provided and valid and passphrase can decrypt the key +func TestConfigClientTLSValidClientCertAndEncryptedKey(t *testing.T) { + key, cert := getCertAndEncryptedKey() + + tlsConfig, err := Client(Options{ + CertFile: cert, + KeyFile: key, + Passphrase: "FooBar123", + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatal("Unexpected client certificates") + } +} + +// The certificate is not set if the provided passphrase cannot decrypt +// the encrypted key. +func TestConfigClientTLSNotSetWithInvalidPassphrase(t *testing.T) { + key, cert := getCertAndEncryptedKey() + + tlsConfig, err := Client(Options{ + CertFile: cert, + KeyFile: key, + Passphrase: "InvalidPassphrase", + }) + + if !IsErrEncryptedKey(err) || tlsConfig != nil { + t.Fatal("Expected failure due to incorrect passphrase.") + } +} + +// Exclusive root pools determines whether the CA pool will be a union of the system +// certificate pool and custom certs, or an exclusive or of the custom certs and system pool +func TestConfigClientExclusiveRootPools(t *testing.T) { + ca := getMultiCert() + + caBytes, err := ioutil.ReadFile(ca) + if err != nil { + t.Fatal("Unable to read CA certs", err) + } + + var testCerts []*x509.Certificate + for _, pemBytes := range [][]byte{caBytes, []byte(systemRootTrustedCert)} { + pemBlock, _ := pem.Decode(pemBytes) + if pemBlock == nil { + t.Fatal("Malformed certificate") + } + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + t.Fatal("Unable to parse certificate") + } + testCerts = append(testCerts, cert) + } + + // ExclusiveRootPools not set, so should be able to verify both system-signed certs + // and custom CA-signed certs + tlsConfig, err := Client(Options{CAFile: ca}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + for i, cert := range testCerts { + if _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}); err != nil { + t.Fatalf("Unable to verify certificate %d: %v", i, err) + } + } + + // ExclusiveRootPools set and custom CA provided, so system certs should not be verifiable + // and custom CA-signed certs should be verifiable + tlsConfig, err = Client(Options{ + CAFile: ca, + ExclusiveRootPools: true, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}) + switch { + case i == 0 && err != nil: + t.Fatal("Unable to verify custom certificate, even though the root pool should have only the custom CA", err) + case i == 1 && err == nil: + t.Fatal("Successfully verified system root-signed certificate though the root pool should have only the cusotm CA", err) + } + } + + // No CA file provided, system cert should be verifiable only + tlsConfig, err = Client(Options{}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}) + switch { + case i == 1 && err != nil: + t.Fatal("Unable to verify system root-signed certificate, even though the root pool should be the system pool only", err) + case i == 0 && err == nil: + t.Fatal("Successfully verified custom certificate though the root pool should be the system pool only", err) + } + } +} + +// If a valid MinVersion is specified in the options, the client's +// minimum version should be set accordingly +func TestConfigClientTLSMinVersionIsSetBasedOnOptions(t *testing.T) { + key, cert := getCertAndKey() + + tlsConfig, err := Client(Options{ + MinVersion: tls.VersionTLS12, + CertFile: cert, + KeyFile: key, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if tlsConfig.MinVersion != tls.VersionTLS12 { + t.Fatal("Unexpected minimum TLS version: ", tlsConfig.MinVersion) + } +} + +// An error should be returned if the specified minimum version for the client +// is too low, i.e. less than VersionTLS12 +func TestConfigClientTLSMinVersionNotSetIfMinVersionIsTooLow(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Client(Options{ + MinVersion: tls.VersionTLS11, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned an error for minimum version below TLS12") + } +} + +// An error should be returned if an invalid minimum version for the client is +// in the options struct +func TestConfigClientTLSMinVersionNotSetIfMinVersionIsInvalid(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Client(Options{ + MinVersion: 1, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned error on invalid minimum version option") + } +} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go index b6485edffcf..44616c27187 100644 --- a/vendor/github.com/docker/go-units/size.go +++ b/vendor/github.com/docker/go-units/size.go @@ -58,7 +58,7 @@ func CustomSize(format string, size float64, base float64, _map []string) string // instead of 4 digit precision used in units.HumanSize. func HumanSizeWithPrecision(size float64, precision int) string { size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g %s", precision, size, unit) + return fmt.Sprintf("%.*g%s", precision, size, unit) } // HumanSize returns a human-readable approximation of a size @@ -70,7 +70,7 @@ func HumanSize(size float64) string { // BytesSize returns a human-readable size in bytes, kibibytes, // mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). func BytesSize(size float64) string { - return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) } // FromHumanSize returns an integer from a human-readable specification of a diff --git a/vendor/github.com/docker/go-units/size_test.go b/vendor/github.com/docker/go-units/size_test.go index 7b75a0b758c..8923e507612 100644 --- a/vendor/github.com/docker/go-units/size_test.go +++ b/vendor/github.com/docker/go-units/size_test.go @@ -60,26 +60,26 @@ func ExampleRAMInBytes() { } func TestBytesSize(t *testing.T) { - assertEquals(t, "1 KiB", BytesSize(1024)) - assertEquals(t, "1 MiB", BytesSize(1024*1024)) - assertEquals(t, "1 MiB", BytesSize(1048576)) - assertEquals(t, "2 MiB", BytesSize(2*MiB)) - assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) - assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) - assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) - assertEquals(t, "1.049e+06 YiB", BytesSize(KiB*KiB*KiB*KiB*KiB*PiB)) + assertEquals(t, "1KiB", BytesSize(1024)) + assertEquals(t, "1MiB", BytesSize(1024*1024)) + assertEquals(t, "1MiB", BytesSize(1048576)) + assertEquals(t, "2MiB", BytesSize(2*MiB)) + assertEquals(t, "3.42GiB", BytesSize(3.42*GiB)) + assertEquals(t, "5.372TiB", BytesSize(5.372*TiB)) + assertEquals(t, "2.22PiB", BytesSize(2.22*PiB)) + assertEquals(t, "1.049e+06YiB", BytesSize(KiB*KiB*KiB*KiB*KiB*PiB)) } func TestHumanSize(t *testing.T) { - assertEquals(t, "1 kB", HumanSize(1000)) - assertEquals(t, "1.024 kB", HumanSize(1024)) - assertEquals(t, "1 MB", HumanSize(1000000)) - assertEquals(t, "1.049 MB", HumanSize(1048576)) - assertEquals(t, "2 MB", HumanSize(2*MB)) - assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB))) - assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB))) - assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB))) - assertEquals(t, "1e+04 YB", HumanSize(float64(10000000000000*PB))) + assertEquals(t, "1kB", HumanSize(1000)) + assertEquals(t, "1.024kB", HumanSize(1024)) + assertEquals(t, "1MB", HumanSize(1000000)) + assertEquals(t, "1.049MB", HumanSize(1048576)) + assertEquals(t, "2MB", HumanSize(2*MB)) + assertEquals(t, "3.42GB", HumanSize(float64(3.42*GB))) + assertEquals(t, "5.372TB", HumanSize(float64(5.372*TB))) + assertEquals(t, "2.22PB", HumanSize(float64(2.22*PB))) + assertEquals(t, "1e+04YB", HumanSize(float64(10000000000000*PB))) } func TestFromHumanSize(t *testing.T) { diff --git a/vendor/github.com/exponent-io/jsonpath/.gitignore b/vendor/github.com/exponent-io/jsonpath/.gitignore deleted file mode 100644 index daf913b1b34..00000000000 --- a/vendor/github.com/exponent-io/jsonpath/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/exponent-io/jsonpath/.travis.yml b/vendor/github.com/exponent-io/jsonpath/.travis.yml deleted file mode 100644 index f4f458a416d..00000000000 --- a/vendor/github.com/exponent-io/jsonpath/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go - -go: - - 1.5 - - tip diff --git a/vendor/github.com/exponent-io/jsonpath/README.md b/vendor/github.com/exponent-io/jsonpath/README.md deleted file mode 100644 index 382fb3138cb..00000000000 --- a/vendor/github.com/exponent-io/jsonpath/README.md +++ /dev/null @@ -1,66 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath) -[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath) - -# jsonpath - -This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used. - -This Decoder has the following enhancements... - * The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions). - * The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path. - * The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token. - * The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string. - -## Installation - - go get -u github.com/exponent-io/jsonpath - -## Example Usage - -#### SeekTo - -```go -import "github.com/exponent-io/jsonpath" - -var j = []byte(`[ - {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}}, - {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}} -]`) - -w := json.NewDecoder(bytes.NewReader(j)) -var v interface{} - -w.SeekTo(1, "Point", "G") -w.Decode(&v) // v is 218 -``` - -#### Scan with PathActions - -```go -var j = []byte(`{"colors":[ - {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}}, - {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}} -]}`) - -var actions PathActions - -// Extract the value at Point.A -actions.Add(func(d *Decoder) error { - var alpha int - err := d.Decode(&alpha) - fmt.Printf("Alpha: %v\n", alpha) - return err -}, "Point", "A") - -w := NewDecoder(bytes.NewReader(j)) -w.SeekTo("colors", 0) - -var ok = true -var err error -for ok { - ok, err = w.Scan(&actions) - if err != nil && err != io.EOF { - panic(err) - } -} -``` diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go deleted file mode 100644 index 31de46c7381..00000000000 --- a/vendor/github.com/exponent-io/jsonpath/decoder.go +++ /dev/null @@ -1,210 +0,0 @@ -package jsonpath - -import ( - "encoding/json" - "io" -) - -// KeyString is returned from Decoder.Token to represent each key in a JSON object value. -type KeyString string - -// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens. -type Decoder struct { - json.Decoder - - path JsonPath - context jsonContext -} - -// NewDecoder creates a new instance of the extended JSON Decoder. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{Decoder: *json.NewDecoder(r)} -} - -// SeekTo causes the Decoder to move forward to a given path in the JSON structure. -// -// The path argument must consist of strings or integers. Each string specifies an JSON object key, and -// each integer specifies an index into a JSON array. -// -// Consider the JSON structure -// -// { "a": [0,"s",12e4,{"b":0,"v":35} ] } -// -// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object, -// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v". -// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35. -// -// SeekTo returns a boolean value indicating whether a match was found. -// -// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only. -func (d *Decoder) SeekTo(path ...interface{}) (bool, error) { - - if len(path) == 0 { - return len(d.path) == 0, nil - } - last := len(path) - 1 - if i, ok := path[last].(int); ok { - path[last] = i - 1 - } - - for { - if d.path.Equal(path) { - return true, nil - } - _, err := d.Token() - if err == io.EOF { - return false, nil - } else if err != nil { - return false, err - } - } -} - -// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is -// equivalent to encoding/json.Decode(). -func (d *Decoder) Decode(v interface{}) error { - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - return d.Decoder.Decode(v) -} - -// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the -// position of the most-recently parsed token. -func (d *Decoder) Path() JsonPath { - p := make(JsonPath, len(d.path)) - copy(p, d.path) - return p -} - -// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes -// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a -// KeyString rather than as a native string. -func (d *Decoder) Token() (json.Token, error) { - t, err := d.Decoder.Token() - if err != nil { - return t, err - } - - if t == nil { - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - return t, err - } - - switch t := t.(type) { - case json.Delim: - switch t { - case json.Delim('{'): - if d.context == arrValue { - d.path.incTop() - } - d.path.push("") - d.context = objKey - break - case json.Delim('}'): - d.path.pop() - d.context = d.path.inferContext() - break - case json.Delim('['): - if d.context == arrValue { - d.path.incTop() - } - d.path.push(-1) - d.context = arrValue - break - case json.Delim(']'): - d.path.pop() - d.context = d.path.inferContext() - break - } - case float64, json.Number, bool: - switch d.context { - case objValue: - d.context = objKey - break - case arrValue: - d.path.incTop() - break - } - break - case string: - switch d.context { - case objKey: - d.path.nameTop(t) - d.context = objValue - return KeyString(t), err - case objValue: - d.context = objKey - case arrValue: - d.path.incTop() - } - break - } - - return t, err -} - -// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array) -// invoking each matching PathAction along the way. -// -// Scan returns true if there are more contiguous values to scan (for example in an array). -func (d *Decoder) Scan(ext *PathActions) (bool, error) { - - rootPath := d.Path() - - // If this is an array path, increment the root path in our local copy. - if rootPath.inferContext() == arrValue { - rootPath.incTop() - } - - for { - // advance the token position - _, err := d.Token() - if err != nil { - return false, err - } - - match: - var relPath JsonPath - - // capture the new JSON path - path := d.Path() - - if len(path) > len(rootPath) { - // capture the path relative to where the scan started - relPath = path[len(rootPath):] - } else { - // if the path is not longer than the root, then we are done with this scan - // return boolean flag indicating if there are more items to scan at the same level - return d.Decoder.More(), nil - } - - // match the relative path against the path actions - if node := ext.node.match(relPath); node != nil { - if node.action != nil { - // we have a match so execute the action - err = node.action(d) - if err != nil { - return d.Decoder.More(), err - } - // The action may have advanced the decoder. If we are in an array, advancing it further would - // skip tokens. So, if we are scanning an array, jump to the top without advancing the token. - if d.path.inferContext() == arrValue && d.Decoder.More() { - goto match - } - } - } - } -} diff --git a/vendor/github.com/exponent-io/jsonpath/decoder_test.go b/vendor/github.com/exponent-io/jsonpath/decoder_test.go deleted file mode 100644 index 16b91c600da..00000000000 --- a/vendor/github.com/exponent-io/jsonpath/decoder_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package jsonpath - -import ( - "bytes" - "fmt" - "reflect" - "testing" -) - -var decoderSkipTests = []struct { - in string - path []interface{} - match bool - err error - out interface{} -}{ - {in: `{}`, path: []interface{}{"a", "b2"}, match: false, err: nil}, - {in: `{"a":{"b":{"c":14}}}`, path: []interface{}{"a"}, match: true, out: map[string]interface{}{"b": map[string]interface{}{"c": float64(14)}}, err: nil}, - {in: `{"a":{"b":{"c":14}}}`, path: []interface{}{"a", "b"}, match: true, out: map[string]interface{}{"c": float64(14)}, err: nil}, - {in: `{"a":{"b":{"c":14}}}`, path: []interface{}{"a", "b", "c"}, match: true, out: float64(14), err: nil}, - {in: `{"a":{"d":{"b":{"c":3}}, "b":{"c":14}}}`, path: []interface{}{"a", "b", "c"}, match: true, out: float64(14), err: nil}, - {in: `{"a":{"b":{"c":14},"b2":3}}`, path: []interface{}{"a", "b2"}, match: true, out: float64(3), err: nil}, - - {in: `[]`, path: []interface{}{2}, match: false, err: nil}, - {in: `[0,1]`, path: []interface{}{0}, match: true, out: float64(0), err: nil}, - {in: `[0,1,2]`, path: []interface{}{2}, match: true, out: float64(2), err: nil}, - {in: `[0,1 , 2]`, path: []interface{}{2}, match: true, out: float64(2), err: nil}, - {in: `[0,{"b":1},2]`, path: []interface{}{1}, match: true, out: map[string]interface{}{"b": float64(1)}, err: nil}, - {in: `[1,{"b":1},2]`, path: []interface{}{2}, match: true, out: float64(2), err: nil}, - {in: `[1,{"b":[1]},2]`, path: []interface{}{2}, match: true, out: float64(2), err: nil}, - {in: `[1,[{"b":[1]},3],2]`, path: []interface{}{2}, match: true, out: float64(2), err: nil}, - - {in: `[1,[{"b":[1]},3],4]`, path: []interface{}{1, 1}, match: true, out: float64(3), err: nil}, - {in: `[1,[{"b":[1]},3],2]`, path: []interface{}{1, 0, "b", 0}, match: true, out: float64(1), err: nil}, - {in: `[1,[{"b":[1]},3],5]`, path: []interface{}{2}, match: true, out: float64(5), err: nil}, - {in: `{"b":[{"a":0},{"a":1}]}`, path: []interface{}{"b", 0, "a"}, match: true, out: float64(0), err: nil}, - {in: `{"b":[{"a":0},{"a":1}]}`, path: []interface{}{"b", 1, "a"}, match: true, out: float64(1), err: nil}, - {in: `{"a":"b","b":"z","z":"s"}`, path: []interface{}{"b"}, match: true, out: "z", err: nil}, - {in: `{"a":"b","b":"z","l":0,"z":"s"}`, path: []interface{}{"z"}, match: true, out: "s", err: nil}, -} - -func TestDecoderSeekTo(t *testing.T) { - var testDesc string - var v interface{} - var match bool - var err error - - for ti, tst := range decoderSkipTests { - - w := NewDecoder(bytes.NewBuffer([]byte(tst.in))) - testDesc = fmt.Sprintf("#%v '%s'", ti, tst.in) - - match, err = w.SeekTo(tst.path...) - if match != tst.match { - t.Errorf("#%v expected match=%v was match=%v : %v", ti, tst.match, match, testDesc) - } - if !reflect.DeepEqual(err, tst.err) { - t.Errorf("#%v unexpected error: '%v' expecting '%v' : %v", ti, err, tst.err, testDesc) - } - - if match { - if err = w.Decode(&v); err != nil { - t.Errorf("#%v decode: error: '%v' : %v", ti, err, testDesc) - } - if !reflect.DeepEqual(v, tst.out) { - t.Errorf("#%v decode: expected %#v, was %#v : %v", ti, tst.out, v, testDesc) - } - } - } -} - -func TestDecoderMoveMultiple(t *testing.T) { - - j := []byte(` - { - "foo": 1, - "bar": 2, - "test": "Hello, world!", - "baz": 123.1, - "array": [ - {"foo": 1}, - {"bar": 2}, - {"baz": 3} - ], - "subobj": { - "foo": 1, - "subarray": [1,2,3], - "subsubobj": { - "bar": 2, - "baz": 3, - "array": ["hello", "world"] - } - }, - "bool": true - }`) - - tests := [][]struct { - path []interface{} - out interface{} - match bool - }{ - { // Every Element - {path: []interface{}{"foo"}, match: true, out: float64(1)}, - {path: []interface{}{"bar"}, match: true, out: float64(2)}, - {path: []interface{}{"test"}, match: true, out: "Hello, world!"}, - {path: []interface{}{"baz"}, match: true, out: float64(123.1)}, - {path: []interface{}{"array", 0, "foo"}, match: true, out: float64(1)}, - {path: []interface{}{"array", 1, "bar"}, match: true, out: float64(2)}, - {path: []interface{}{"array", 2, "baz"}, match: true, out: float64(3)}, - {path: []interface{}{"subobj", "foo"}, match: true, out: float64(1)}, - {path: []interface{}{"subobj", "subarray", 0}, match: true, out: float64(1)}, - {path: []interface{}{"subobj", "subarray", 1}, match: true, out: float64(2)}, - {path: []interface{}{"subobj", "subarray", 2}, match: true, out: float64(3)}, - {path: []interface{}{"subobj", "subsubobj", "bar"}, match: true, out: float64(2)}, - {path: []interface{}{"subobj", "subsubobj", "baz"}, match: true, out: float64(3)}, - {path: []interface{}{"subobj", "subsubobj", "array", 0}, match: true, out: "hello"}, - {path: []interface{}{"subobj", "subsubobj", "array", 1}, match: true, out: "world"}, - {path: []interface{}{"bool"}, match: true, out: true}, - }, - { // Deep, then shallow - {path: []interface{}{"subobj", "subsubobj", "array", 0}, match: true, out: "hello"}, - {path: []interface{}{"bool"}, match: true, out: true}, - }, - { // Complex, then shallow - {path: []interface{}{"array"}, match: true, out: []interface{}{map[string]interface{}{"foo": float64(1)}, map[string]interface{}{"bar": float64(2)}, map[string]interface{}{"baz": float64(3)}}}, - {path: []interface{}{"subobj", "subsubobj"}, match: true, out: map[string]interface{}{"bar": float64(2), "baz": float64(3), "array": []interface{}{"hello", "world"}}}, - {path: []interface{}{"bool"}, match: true, out: true}, - }, - { - {path: []interface{}{"foo"}, match: true, out: float64(1)}, - {path: []interface{}{"test"}, match: true, out: "Hello, world!"}, - {path: []interface{}{"array", 1, "bar"}, match: true, out: float64(2)}, - {path: []interface{}{"subobj", "subarray", 1}, match: true, out: float64(2)}, - {path: []interface{}{"subobj", "subarray", 2}, match: true, out: float64(3)}, - {path: []interface{}{"subobj", "subsubobj", "array", 1}, match: true, out: "world"}, - }, - } - - for _, tst := range tests { - - w := NewDecoder(bytes.NewBuffer(j)) - - var err error - var v interface{} - var m bool - - for i, step := range tst { - - m, err = w.SeekTo(step.path...) - if m != step.match { - t.Errorf("@%v expected match=%v, but was match=%v", i, step.match, m) - } - if err != nil { - t.Errorf("unexpected error: %v", err) - } - err = w.Decode(&v) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(v, step.out) { - t.Errorf("expected %v but was %v", step.out, v) - } - } - } - -} diff --git a/vendor/github.com/exponent-io/jsonpath/example_test.go b/vendor/github.com/exponent-io/jsonpath/example_test.go deleted file mode 100644 index 8253bfe16c5..00000000000 --- a/vendor/github.com/exponent-io/jsonpath/example_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package jsonpath - -import ( - "bytes" - "fmt" - "io" -) - -func ExampleDecoder_SeekTo() { - - var j = []byte(`[ - {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}}, - {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}} - ]`) - - w := NewDecoder(bytes.NewReader(j)) - var v interface{} - - w.SeekTo(0, "Space") - w.Decode(&v) - fmt.Println(0, "Space", v) - - w.SeekTo(0, "Point", "Cr") - w.Decode(&v) - fmt.Println(0, "Point", "Cr", v) - - w.SeekTo(1, "Point", "G") - w.Decode(&v) - fmt.Println(1, "Point", "G", v) - - // Output: - // 0 Space YCbCr - // 0 Point Cr -10 - // 1 Point G 218 -} - -func ExampleDecoder_Scan() { - - var j = []byte(`{"colors":[ - {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}}, - {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}} - ]}`) - - var actions PathActions - - // Extract the value at Point.A - actions.Add(func(d *Decoder) error { - var alpha int - err := d.Decode(&alpha) - fmt.Printf("Alpha: %v\n", alpha) - return err - }, "Point", "A") - - w := NewDecoder(bytes.NewReader(j)) - w.SeekTo("colors", 0) - - var ok = true - var err error - for ok { - ok, err = w.Scan(&actions) - if err != nil && err != io.EOF { - panic(err) - } - } - - // Output: - // Alpha: 58 - // Alpha: 231 -} - -func ExampleDecoder_Scan_anyIndex() { - - var j = []byte(`{"colors":[ - {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}}, - {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}} - ]}`) - - var actions PathActions - - // Extract the value at Point.A - actions.Add(func(d *Decoder) error { - var cr int - err := d.Decode(&cr) - fmt.Printf("Chrominance (Cr): %v\n", cr) - return err - }, "colors", AnyIndex, "Point", "Cr") - - actions.Add(func(d *Decoder) error { - var r int - err := d.Decode(&r) - fmt.Printf("Red: %v\n", r) - return err - }, "colors", AnyIndex, "Point", "R") - - w := NewDecoder(bytes.NewReader(j)) - - var ok = true - var err error - for ok { - ok, err = w.Scan(&actions) - if err != nil && err != io.EOF { - panic(err) - } - } - - // Output: - // Chrominance (Cr): -10 - // Red: 98 -} diff --git a/vendor/github.com/exponent-io/jsonpath/path.go b/vendor/github.com/exponent-io/jsonpath/path.go deleted file mode 100644 index d7db2ad336e..00000000000 --- a/vendor/github.com/exponent-io/jsonpath/path.go +++ /dev/null @@ -1,67 +0,0 @@ -// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens. -package jsonpath - -import "fmt" - -type jsonContext int - -const ( - none jsonContext = iota - objKey - objValue - arrValue -) - -// AnyIndex can be used in a pattern to match any array index. -const AnyIndex = -2 - -// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and -// each integer specifies an index into a JSON array. -type JsonPath []interface{} - -func (p *JsonPath) push(n interface{}) { *p = append(*p, n) } -func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] } - -// increment the index at the top of the stack (must be an array index) -func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 } - -// name the key at the top of the stack (must be an object key) -func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n } - -// infer the context from the item at the top of the stack -func (p *JsonPath) inferContext() jsonContext { - if len(*p) == 0 { - return none - } - t := (*p)[len(*p)-1] - switch t.(type) { - case string: - return objKey - case int: - return arrValue - default: - panic(fmt.Sprintf("Invalid stack type %T", t)) - } -} - -// Equal tests for equality between two JsonPath types. -func (p *JsonPath) Equal(o JsonPath) bool { - if len(*p) != len(o) { - return false - } - for i, v := range *p { - if v != o[i] { - return false - } - } - return true -} - -func (p *JsonPath) HasPrefix(o JsonPath) bool { - for i, v := range o { - if v != (*p)[i] { - return false - } - } - return true -} diff --git a/vendor/github.com/exponent-io/jsonpath/path_test.go b/vendor/github.com/exponent-io/jsonpath/path_test.go deleted file mode 100644 index e4fe998711a..00000000000 --- a/vendor/github.com/exponent-io/jsonpath/path_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package jsonpath - -import ( - "bytes" - "encoding/json" - "io" - "testing" -) - -func TestTokensAndPaths(t *testing.T) { - j := []byte(` - { - "foo": 1, - "bar": 2, - "test": "Hello, world!", - "baz": 123.1, - "array": [ - {"foo": 1}, - {"bar": 2}, - {"baz": 3} - ], - "subobj": { - "foo": 1, - "subarray": [1,2,3], - "subsubobj": { - "bar": 2, - "baz": 3, - "array": ["hello", "world"] - } - }, - "bool": true, - "a": [[0],[[1]]] - }`) - - expPaths := []JsonPath{ - {""}, - {"foo"}, {"foo"}, - {"bar"}, {"bar"}, - {"test"}, {"test"}, - {"baz"}, {"baz"}, - {"array"}, {"array", -1}, - {"array", 0, ""}, {"array", 0, "foo"}, {"array", 0, "foo"}, {"array", 0}, - {"array", 1, ""}, {"array", 1, "bar"}, {"array", 1, "bar"}, {"array", 1}, - {"array", 2, ""}, {"array", 2, "baz"}, {"array", 2, "baz"}, {"array", 2}, - {"array"}, - {"subobj"}, - {"subobj", ""}, - {"subobj", "foo"}, {"subobj", "foo"}, - {"subobj", "subarray"}, {"subobj", "subarray", -1}, - {"subobj", "subarray", 0}, {"subobj", "subarray", 1}, {"subobj", "subarray", 2}, - {"subobj", "subarray"}, - {"subobj", "subsubobj"}, {"subobj", "subsubobj", ""}, - {"subobj", "subsubobj", "bar"}, {"subobj", "subsubobj", "bar"}, - {"subobj", "subsubobj", "baz"}, {"subobj", "subsubobj", "baz"}, - {"subobj", "subsubobj", "array"}, {"subobj", "subsubobj", "array", -1}, - {"subobj", "subsubobj", "array", 0}, {"subobj", "subsubobj", "array", 1}, - {"subobj", "subsubobj", "array"}, - {"subobj", "subsubobj"}, {"subobj"}, - {"bool"}, {"bool"}, - {"a"}, {"a", -1}, {"a", 0, -1}, {"a", 0, 0}, {"a", 0}, {"a", 1, -1}, - {"a", 1, 0, -1}, {"a", 1, 0, 0}, {"a", 1, 0}, {"a", 1}, {"a"}, - {}, - } - - expTokens := []json.Token{ - json.Delim('{'), - KeyString("foo"), float64(1), - KeyString("bar"), float64(2), - KeyString("test"), "Hello, world!", - KeyString("baz"), float64(123.1), - KeyString("array"), json.Delim('['), - json.Delim('{'), KeyString("foo"), float64(1), json.Delim('}'), - json.Delim('{'), KeyString("bar"), float64(2), json.Delim('}'), - json.Delim('{'), KeyString("baz"), float64(3), json.Delim('}'), - json.Delim(']'), - KeyString("subobj"), - json.Delim('{'), - KeyString("foo"), float64(1), - KeyString("subarray"), json.Delim('['), - float64(1), float64(2), float64(3), - json.Delim(']'), - KeyString("subsubobj"), json.Delim('{'), - KeyString("bar"), float64(2), - KeyString("baz"), float64(3), - KeyString("array"), json.Delim('['), - "hello", "world", - json.Delim(']'), - json.Delim('}'), json.Delim('}'), - KeyString("bool"), true, - KeyString("a"), - json.Delim('['), json.Delim('['), float64(0), json.Delim(']'), - json.Delim('['), json.Delim('['), float64(1), json.Delim(']'), json.Delim(']'), - json.Delim(']'), - json.Delim('}'), - } - outTokens := []json.Token{} - outPaths := []JsonPath{} - - d := NewDecoder(bytes.NewBuffer(j)) - - for { - st, err := d.Token() - if err == io.EOF { - break - } else if err != nil { - t.Error(err) - break - } - outTokens = append(outTokens, st) - outPaths = append(outPaths, d.Path()) - } - - // Check tokens - if len(outTokens) != len(expTokens) { - t.Errorf("Out has %v elements, expected %v", len(outTokens), len(expTokens)) - } - for i, v := range expTokens { - if v != outTokens[i] { - t.Errorf("@%v exp: %T:%v but was: %T:%v", i, v, v, outTokens[i], outTokens[i]) - } - } - - // Check paths - if len(outPaths) != len(expPaths) { - t.Errorf("Outpaths has %v elements, expected %v", len(outPaths), len(expPaths)) - } - for i, v := range expPaths { - if !v.Equal(outPaths[i]) { - t.Errorf("@%v exp: %T:%v but was: %T:%v", i, v, v, outPaths[i], outPaths[i]) - } - } -} diff --git a/vendor/github.com/exponent-io/jsonpath/pathaction.go b/vendor/github.com/exponent-io/jsonpath/pathaction.go deleted file mode 100644 index 497ed686ca9..00000000000 --- a/vendor/github.com/exponent-io/jsonpath/pathaction.go +++ /dev/null @@ -1,61 +0,0 @@ -package jsonpath - -// pathNode is used to construct a trie of paths to be matched -type pathNode struct { - matchOn interface{} // string, or integer - childNodes []pathNode - action DecodeAction -} - -// match climbs the trie to find a node that matches the given JSON path. -func (n *pathNode) match(path JsonPath) *pathNode { - var node *pathNode = n - for _, ps := range path { - found := false - for i, n := range node.childNodes { - if n.matchOn == ps { - node = &node.childNodes[i] - found = true - break - } else if _, ok := ps.(int); ok && n.matchOn == AnyIndex { - node = &node.childNodes[i] - found = true - break - } - } - if !found { - return nil - } - } - return node -} - -// PathActions represents a collection of DecodeAction functions that should be called at certain path positions -// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams. -type PathActions struct { - node pathNode -} - -// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail. -type DecodeAction func(d *Decoder) error - -// Add specifies an action to call on the Decoder when the specified path is encountered. -func (je *PathActions) Add(action DecodeAction, path ...interface{}) { - - var node *pathNode = &je.node - for _, ps := range path { - found := false - for i, n := range node.childNodes { - if n.matchOn == ps { - node = &node.childNodes[i] - found = true - break - } - } - if !found { - node.childNodes = append(node.childNodes, pathNode{matchOn: ps}) - node = &node.childNodes[len(node.childNodes)-1] - } - } - node.action = action -} diff --git a/vendor/github.com/exponent-io/jsonpath/pathaction_test.go b/vendor/github.com/exponent-io/jsonpath/pathaction_test.go deleted file mode 100644 index e42fea54195..00000000000 --- a/vendor/github.com/exponent-io/jsonpath/pathaction_test.go +++ /dev/null @@ -1,563 +0,0 @@ -package jsonpath - -import ( - "bytes" - "fmt" - "io" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestPathActionSingleMatch(t *testing.T) { - - j := []byte(` - { - "foo": 1, - "bar": 2, - "test": "Hello, world!", - "baz": 123.1, - "array": [ - {"foo": 1}, - {"bar": 2}, - {"baz": 3} - ], - "subobj": { - "foo": 1, - "subarray": [1,2,3], - "subsubobj": { - "bar": 2, - "baz": 3, - "array": ["hello", "world"] - } - }, - "bool": true - }`) - - decodeCount := 0 - decode := func(d *Decoder) interface{} { - decodeCount++ - var v interface{} - err := d.Decode(&v) - assert.NoError(t, err) - return v - } - - dc := NewDecoder(bytes.NewBuffer(j)) - actions := &PathActions{} - - actions.Add(func(d *Decoder) error { - assert.Equal(t, float64(2), decode(d)) - return nil - }, "array", 1, "bar") - - actions.Add(func(d *Decoder) error { - assert.Equal(t, "Hello, world!", decode(d)) - return nil - }, "test") - - actions.Add(func(d *Decoder) error { - assert.Equal(t, []interface{}{float64(1), float64(2), float64(3)}, decode(d)) - return nil - }, "subobj", "subarray") - - actions.Add(func(d *Decoder) error { - assert.Equal(t, float64(1), decode(d)) - return nil - }, "foo") - - actions.Add(func(d *Decoder) error { - assert.Equal(t, float64(2), decode(d)) - return nil - }, "bar") - - dc.Scan(actions) - - assert.Equal(t, 5, decodeCount) -} - -func TestPathActionAnyIndex(t *testing.T) { - - j := []byte(` - { - "foo": 1, - "bar": 2, - "test": "Hello, world!", - "baz": 123.1, - "array": [ - {"num": 1}, - {"num": 2}, - {"num": 3} - ], - "subobj": { - "foo": 1, - "subarray": [1,2,3], - "subsubobj": { - "bar": 2, - "baz": 3, - "array": ["hello", "world"] - } - }, - "bool": true - }`) - - dc := NewDecoder(bytes.NewBuffer(j)) - actions := &PathActions{} - - numbers := []int{} - actions.Add(func(d *Decoder) (err error) { - var v int - err = d.Decode(&v) - require.NoError(t, err) - numbers = append(numbers, v) - return - }, "array", AnyIndex, "num") - - numbers2 := []int{} - actions.Add(func(d *Decoder) (err error) { - var v int - err = d.Decode(&v) - require.NoError(t, err) - numbers2 = append(numbers2, v) - return - }, "subobj", "subarray", AnyIndex) - - strings := []string{} - actions.Add(func(d *Decoder) (err error) { - var v string - err = d.Decode(&v) - require.NoError(t, err) - strings = append(strings, v) - return - }, "subobj", "subsubobj", "array", AnyIndex) - - dc.Scan(actions) - - assert.Equal(t, []int{1, 2, 3}, numbers) - assert.Equal(t, []int{1, 2, 3}, numbers2) - assert.Equal(t, []string{"hello", "world"}, strings) -} - -func TestPathActionJsonStream(t *testing.T) { - - j := []byte(` - { - "make": "Porsche", - "model": "356 Coupé", - "years": { "from": 1948, "to": 1965} - } - { - "years": { "from": 1964, "to": 1969}, - "make": "Ford", - "model": "GT40" - } - { - "make": "Ferrari", - "model": "308 GTB", - "years": { "to": 1985, "from": 1975} - } - `) - - dc := NewDecoder(bytes.NewBuffer(j)) - - var from, to []int - actions := &PathActions{} - actions.Add(func(d *Decoder) (err error) { - var v int - err = d.Decode(&v) - require.NoError(t, err) - from = append(from, v) - return - }, "years", "from") - actions.Add(func(d *Decoder) (err error) { - var v int - err = d.Decode(&v) - require.NoError(t, err) - to = append(to, v) - return - }, "years", "to") - - var err error - var ok = true - for ok && err == nil { - ok, err = dc.Scan(actions) - if err != io.EOF { - require.NoError(t, err) - } - } - - assert.Equal(t, []int{1948, 1964, 1975}, from) - assert.Equal(t, []int{1965, 1969, 1985}, to) -} - -func TestPathActionJsonSubObjects(t *testing.T) { - - j := []byte(` - { - "set": "cars", - "data": [ - { - "make": "Porsche", - "model": "356 Coupé", - "years": { "from": 1948, "to": 1965} - }, - { - "years": { "from": 1964, "to": 1969}, - "make": "Ford", - "model": "GT40" - }, - { - "make": "Ferrari", - "model": "308 GTB", - "years": { "to": 1985, "from": 1975} - } - ], - "more": true - } - `) - - dc := NewDecoder(bytes.NewBuffer(j)) - - var from, to []int - actions := &PathActions{} - actions.Add(func(d *Decoder) (err error) { - var v int - err = d.Decode(&v) - require.NoError(t, err) - from = append(from, v) - return - }, "data", AnyIndex, "years", "from") - actions.Add(func(d *Decoder) (err error) { - var v int - err = d.Decode(&v) - require.NoError(t, err) - to = append(to, v) - return - }, "data", AnyIndex, "years", "to") - - var err error - var ok = true - for ok && err == nil { - _, err = dc.Scan(actions) - if err != io.EOF { - require.NoError(t, err) - } - } - - assert.Equal(t, []int{1948, 1964, 1975}, from) - assert.Equal(t, []int{1965, 1969, 1985}, to) -} - -func TestPathActionSeekThenScan(t *testing.T) { - - j := []byte(` - { - "set": "cars", - "data": [ - { - "make": "Porsche", - "model": "356 Coupé", - "years": { "from": 1948, "to": 1965} - }, - { - "years": { "from": 1964, "to": 1969}, - "make": "Ford", - "model": "GT40" - }, - { - "make": "Ferrari", - "model": "308 GTB", - "years": { "to": 1985, "from": 1975} - } - ], - "more": true - } - `) - - dc := NewDecoder(bytes.NewBuffer(j)) - ok, err := dc.SeekTo("data", 0) - require.NoError(t, err) - require.True(t, ok) - - var from, to int - actions := &PathActions{} - actions.Add(func(d *Decoder) (err error) { - err = d.Decode(&from) - require.NoError(t, err) - return - }, "years", "from") - actions.Add(func(d *Decoder) (err error) { - err = d.Decode(&to) - require.NoError(t, err) - return - }, "years", "to") - - outs := []string{} - for ok && err == nil { - ok, err = dc.Scan(actions) - if err != io.EOF { - require.NoError(t, err) - } - if err == nil || err == io.EOF { - outs = append(outs, fmt.Sprintf("%v-%v", from, to)) - } - } - - assert.Equal(t, []string{"1948-1965", "1964-1969", "1975-1985"}, outs) -} - -func TestPathActionSeekOffsetThenScan(t *testing.T) { - - j := []byte(` - { - "set": "cars", - "data": [ - { - "make": "Porsche", - "model": "356 Coupé", - "years": { "from": 1948, "to": 1965} - }, - { - "years": { "from": 1964, "to": 1969}, - "make": "Ford", - "model": "GT40" - }, - { - "make": "Ferrari", - "model": "308 GTB", - "years": { "to": 1985, "from": 1975} - } - ], - "more": true - } - `) - - dc := NewDecoder(bytes.NewBuffer(j)) - ok, err := dc.SeekTo("data", 1) - require.NoError(t, err) - require.True(t, ok) - - var from, to int - actions := &PathActions{} - actions.Add(func(d *Decoder) (err error) { - err = d.Decode(&from) - require.NoError(t, err) - return - }, "years", "from") - actions.Add(func(d *Decoder) (err error) { - err = d.Decode(&to) - require.NoError(t, err) - return - }, "years", "to") - - outs := []string{} - for ok && err == nil { - ok, err = dc.Scan(actions) - if err != io.EOF { - require.NoError(t, err) - } - if err == nil || err == io.EOF { - outs = append(outs, fmt.Sprintf("%v-%v", from, to)) - } - } - - assert.Equal(t, []string{"1964-1969", "1975-1985"}, outs) -} - -func TestPathActionSeekThenScanThenScan(t *testing.T) { - - j := []byte(` - { - "set": "cars", - "data": [ - { - "make": "Porsche", - "model": "356 Coupé", - "years": { "from": 1948, "to": 1965} - }, - { - "years": { "from": 1964, "to": 1969}, - "make": "Ford", - "model": "GT40" - } - ], - "more": [ - { - "make": "Ferrari", - "model": "308 GTB", - "years": { "to": 1985, "from": 1975} - } - ] - } - `) - - dc := NewDecoder(bytes.NewBuffer(j)) - ok, err := dc.SeekTo("data", 0) - require.NoError(t, err) - require.True(t, ok) - - var from, to int - actions := &PathActions{} - actions.Add(func(d *Decoder) (err error) { - err = d.Decode(&from) - require.NoError(t, err) - return - }, "years", "from") - actions.Add(func(d *Decoder) (err error) { - err = d.Decode(&to) - require.NoError(t, err) - return - }, "years", "to") - - outs := []string{} - for ok && err == nil { - ok, err = dc.Scan(actions) - if err != io.EOF { - require.NoError(t, err) - } - if err == nil || err == io.EOF { - outs = append(outs, fmt.Sprintf("%v-%v", from, to)) - } - } - - assert.Equal(t, []string{"1948-1965", "1964-1969"}, outs) - - ok, err = dc.SeekTo("more", 0) - require.NoError(t, err) - require.True(t, ok) - outs = []string{} - for ok && err == nil { - ok, err = dc.Scan(actions) - if err != io.EOF { - require.NoError(t, err) - } - if err == nil || err == io.EOF { - outs = append(outs, fmt.Sprintf("%v-%v", from, to)) - } - } - - assert.Equal(t, []string{"1975-1985"}, outs) -} - -func TestPathActionSeekThenScanHetero(t *testing.T) { - - j := []byte(` - { - "set": "cars", - "data": [ - { - "make": "Porsche", - "model": "356 Coupé", - "years": { "from": 1948, "to": 1965} - }, - ["other","random","stuff"], - { - "years": { "from": 1964, "to": 1969}, - "make": "Ford", - "model": "GT40" - }, - {}, - "str", - { - "make": "Ferrari", - "model": "308 GTB", - "years": { "to": 1985, "from": 1975} - } - ], - "more": true - } - `) - - dc := NewDecoder(bytes.NewBuffer(j)) - ok, err := dc.SeekTo("data", 0) - require.NoError(t, err) - require.True(t, ok) - - var from, to int - actions := &PathActions{} - actions.Add(func(d *Decoder) (err error) { - err = d.Decode(&from) - require.NoError(t, err) - return - }, "years", "from") - actions.Add(func(d *Decoder) (err error) { - err = d.Decode(&to) - require.NoError(t, err) - return - }, "years", "to") - - outs := []string{} - for ok && err == nil { - ok, err = dc.Scan(actions) - if err != io.EOF { - require.NoError(t, err) - } - if (err == nil || err == io.EOF) && (from != 0 && to != 0) { - outs = append(outs, fmt.Sprintf("%v-%v", from, to)) - from, to = 0, 0 - } - } - - assert.Equal(t, []string{"1948-1965", "1964-1969", "1975-1985"}, outs) -} - -func TestPathActionNested(t *testing.T) { - - j := []byte(` - { - "set": "cars", - "data": [ - { - "make": "Porsche", - "model": "356 Coupé", - "years": { "from": 1948, "to": 1965} - }, - { - "years": { "from": 1964, "to": 1969}, - "make": "Ford", - "model": "GT40" - }, - { - "make": "Ferrari", - "model": "308 GTB", - "years": { "to": 1985, "from": 1975} - } - ], - "more": true - } - `) - - var from, to int - caractions := &PathActions{} - caractions.Add(func(d *Decoder) (err error) { - err = d.Decode(&from) - require.NoError(t, err) - return - }, "years", "from") - caractions.Add(func(d *Decoder) (err error) { - err = d.Decode(&to) - require.NoError(t, err) - return - }, "years", "to") - - outs := []string{} - - actions := &PathActions{} - actions.Add(func(d *Decoder) error { - - _, err := d.Scan(caractions) - if err != nil { - return err - } - outs = append(outs, fmt.Sprintf("%v-%v", from, to)) - return nil - - }, "data", AnyIndex) - - ok, err := NewDecoder(bytes.NewBuffer(j)).Scan(actions) - assert.NoError(t, err) - assert.False(t, ok) - - assert.Equal(t, []string{"1948-1965", "1964-1969", "1975-1985"}, outs) -} diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore index 7adca9439c5..c5203bf6e73 100644 --- a/vendor/github.com/go-ini/ini/.gitignore +++ b/vendor/github.com/go-ini/ini/.gitignore @@ -2,3 +2,4 @@ testdata/conf_out.ini ini.sublime-project ini.sublime-workspace testdata/conf_reflect.ini +.idea diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml new file mode 100644 index 00000000000..d9d1b8ffadb --- /dev/null +++ b/vendor/github.com/go-ini/ini/.travis.yml @@ -0,0 +1,13 @@ +sudo: false +language: go +go: + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - master + +script: + - go get golang.org/x/tools/cmd/cover + - go get github.com/smartystreets/goconvey + - go test -v -cover -race diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 00000000000..ac034e5258f --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md index 0652fb7339c..e67d51f3204 100644 --- a/vendor/github.com/go-ini/ini/README.md +++ b/vendor/github.com/go-ini/ini/README.md @@ -1,4 +1,4 @@ -ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) === ![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) @@ -9,7 +9,7 @@ Package ini provides INI file read and write functionality in Go. ## Feature -- Load multiple data sources(`[]byte` or file) with overwrites. +- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. - Read with recursion values. - Read with parent-child sections. - Read with auto-increment key names. @@ -22,16 +22,32 @@ Package ini provides INI file read and write functionality in Go. ## Installation +To use a tagged revision: + go get gopkg.in/ini.v1 +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + ## Getting Started ### Loading from data sources -A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. +A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. ```go -cfg, err := ini.Load([]byte("raw data"), "filename") +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) ``` Or start with an empty object: @@ -40,12 +56,78 @@ Or start with an empty object: cfg := ini.Empty() ``` -When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. ```go err := cfg.Append("other file", []byte("other raw data")) ``` +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +#### Ignore cases of key name + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := sec1.GetKey("Key") +key2, err := sec2.GetKey("KeY") +``` + +#### MySQL-like boolean key + +MySQL's configuration allows a key without value as follows: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. + +To generate such keys in your program, you could use `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### Comment + +Take care that following format will be treated as comment: + +1. Line begins with `#` or `;` +2. Words after `#` or `;` +3. Words after section name (i.e words after `[some section name]`) + +If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. + +Alternatively, you can use following `LoadOptions` to completely ignore inline comments: + +```go +cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) +``` + ### Working with sections To get a section, you would need to: @@ -63,7 +145,7 @@ section, err := cfg.GetSection("") When you're pretty sure the section exists, following code could make your life easier: ```go -section := cfg.Section("") +section := cfg.Section("section name") ``` What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. @@ -117,7 +199,7 @@ names := cfg.Section("").KeyStrings() To get a clone hash of keys and corresponding values: ```go -hash := cfg.GetSection("").KeysHash() +hash := cfg.Section("").KeysHash() ``` ### Working with values @@ -155,8 +237,8 @@ To get value with types: ```go // For boolean values: -// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off v, err = cfg.Section("").Key("BOOL").Bool() v, err = cfg.Section("").Key("FLOAT64").Float64() v, err = cfg.Section("").Key("INT").Int() @@ -230,6 +312,16 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 ``` +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + Note that single quotes around values will be stripped: ```ini @@ -268,9 +360,13 @@ vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), min vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 ``` -To auto-split value into slice: +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: ```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] vals = cfg.Section("").Key("STRINGS").Strings(",") vals = cfg.Section("").Key("FLOAT64S").Float64s(",") vals = cfg.Section("").Key("INTS").Ints(",") @@ -280,6 +376,32 @@ vals = cfg.Section("").Key("UINT64S").Uint64s(",") vals = cfg.Section("").Key("TIMES").Times(",") ``` +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + ### Save your configuration Finally, it's time to save your configuration to somewhere. @@ -300,6 +422,12 @@ cfg.WriteTo(writer) cfg.WriteToIndent(writer, "\t") ``` +By default, spaces are used to align "=" sign between key and values, to disable that: + +```go +ini.PrettyFormat = false +``` + ## Advanced Usage ### Recursive Values @@ -341,6 +469,27 @@ CLONE_URL = https://%(IMPORT_PATH)s cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 ``` +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### Unparseable Sections + +Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + ### Auto-increment Key Names If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. @@ -425,8 +574,8 @@ Why not? ```go type Embeded struct { Dates []time.Time `delim:"|"` - Places []string - None []int + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` } type Author struct { @@ -461,8 +610,7 @@ GPA = 2.8 [Embeded] Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = +places = HangZhou,Boston ``` #### Name Mapper @@ -482,7 +630,7 @@ type Info struct { } func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) // ... cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) @@ -496,6 +644,26 @@ func main() { Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. +#### Value Mapper + +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. + #### Other Notes On Map/Reflect Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md index 27fdb32fee0..0cf4194492c 100644 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -2,7 +2,7 @@ ## 功能特性 -- 支持覆盖加载多个数据源(`[]byte` 或文件) +- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) - 支持递归读取键值 - 支持读取父子分区 - 支持读取自增键名 @@ -15,16 +15,32 @@ ## 下载安装 +使用一个特定版本: + go get gopkg.in/ini.v1 +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + ## 开始使用 ### 从数据源加载 -一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 +一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 ```go -cfg, err := ini.Load([]byte("raw data"), "filename") +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) ``` 或者从一个空白的文件开始: @@ -39,6 +55,72 @@ cfg := ini.Empty() err := cfg.Append("other file", []byte("other raw data")) ``` +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +#### 忽略键名的大小写 + +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 和 sec2 指向同一个分区对象 +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 和 key2 指向同一个键对象 +key1, err := sec1.GetKey("Key") +key2, err := sec2.GetKey("KeY") +``` + +#### 类似 MySQL 配置中的布尔值键 + +MySQL 的配置文件中会出现没有具体值的布尔类型的键: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 + +如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### 关于注释 + +下述几种情况的内容将被视为注释: + +1. 所有以 `#` 或 `;` 开头的行 +2. 所有在 `#` 或 `;` 之后的内容 +3. 分区标签后的文字 (即 `[分区名]` 之后的内容) + +如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 + +除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释: + +```go +cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) +``` + ### 操作分区(Section) 获取指定分区: @@ -56,7 +138,7 @@ section, err := cfg.GetSection("") 当您非常确定某个分区是存在的,可以使用以下简便方法: ```go -section := cfg.Section("") +section := cfg.Section("section name") ``` 如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 @@ -110,7 +192,7 @@ names := cfg.Section("").KeyStrings() 获取分区下的所有键值对的克隆: ```go -hash := cfg.GetSection("").KeysHash() +hash := cfg.Section("").KeysHash() ``` ### 操作键值(Value) @@ -148,8 +230,8 @@ yes := cfg.Section("").HasValue("test value") ```go // 布尔值的规则: -// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off v, err = cfg.Section("").Key("BOOL").Bool() v, err = cfg.Section("").Key("FLOAT64").Float64() v, err = cfg.Section("").Key("INT").Int() @@ -223,6 +305,16 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 ``` +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇靠给力啊! + 需要注意的是,值两侧的单引号会被自动剔除: ```ini @@ -261,9 +353,13 @@ vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), min vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 ``` -自动分割键值为切片(slice): +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: ```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] vals = cfg.Section("").Key("STRINGS").Strings(",") vals = cfg.Section("").Key("FLOAT64S").Float64s(",") vals = cfg.Section("").Key("INTS").Ints(",") @@ -273,6 +369,32 @@ vals = cfg.Section("").Key("UINT64S").Uint64s(",") vals = cfg.Section("").Key("TIMES").Times(",") ``` +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + ### 保存配置 终于到了这个时刻,是时候保存一下配置了。 @@ -293,9 +415,15 @@ cfg.WriteTo(writer) cfg.WriteToIndent(writer, "\t") ``` -### 高级用法 +默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: + +```go +ini.PrettyFormat = false +``` + +## 高级用法 -#### 递归读取键值 +### 递归读取键值 在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 @@ -315,7 +443,7 @@ cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini ``` -#### 读取父子分区 +### 读取父子分区 您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 @@ -334,7 +462,28 @@ CLONE_URL = https://%(IMPORT_PATH)s cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 ``` -#### 读取自增键名 +#### 获取上级父分区下的所有键名 + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### 无法解析的分区 + +如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### 读取自增键名 如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 @@ -416,8 +565,8 @@ p := &Person{ ```go type Embeded struct { Dates []time.Time `delim:"|"` - Places []string - None []int + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` } type Author struct { @@ -452,8 +601,7 @@ GPA = 2.8 [Embeded] Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = +places = HangZhou,Boston ``` #### 名称映射器(Name Mapper) @@ -473,7 +621,7 @@ type Info struct{ } func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) // ... cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) @@ -487,6 +635,26 @@ func main() { 使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 +#### 值映射器(Value Mapper) + +值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 + #### 映射/反射的其它说明 任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 00000000000..80afe743158 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 8b09ad3644f..7f3c4d1ed1f 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -16,40 +16,51 @@ package ini import ( - "bufio" "bytes" "errors" "fmt" "io" + "io/ioutil" "os" "regexp" "runtime" - "strconv" "strings" "sync" - "time" ) const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - - _VERSION = "1.7.0" + _VERSION = "1.28.2" ) +// Version returns current package version literal. func Version() string { return _VERSION } var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. LineBreak = "\n" // Variable regexp pattern: %(variable)s varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - // Write spaces around "=" to look better. + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false + + // Indicate whether to put a line between sections + PrettySection = true ) func init() { @@ -67,11 +78,12 @@ func inSlice(str string, s []string) bool { return false } -// dataSource is a interface that returns file content. +// dataSource is an interface that returns object which can be read and closed. type dataSource interface { ReadCloser() (io.ReadCloser, error) } +// sourceFile represents an object that contains content on the local file system. type sourceFile struct { name string } @@ -92,618 +104,24 @@ func (rc *bytesReadCloser) Close() error { return nil } +// sourceData represents an object that contains content in memory. type sourceData struct { data []byte } func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return &bytesReadCloser{bytes.NewReader(s.data)}, nil -} - -// ____ __. -// | |/ _|____ ___.__. -// | <_/ __ < | | -// | | \ ___/\___ | -// |____|__ \___ > ____| -// \/ \/\/ - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncr bool -} - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// String returns string representation of value. -func (k *Key) String() string { - val := k.value - if strings.Index(val, "%") == -1 { - return val - } - - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string devide by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - vals := strings.Split(str, delim) - for i := range vals { - vals[i] = strings.TrimSpace(vals[i]) - } - return vals -} - -// Float64s returns list of float64 devide by given delimiter. -func (k *Key) Float64s(delim string) []float64 { - strs := k.Strings(delim) - vals := make([]float64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseFloat(strs[i], 64) - } - return vals -} - -// Ints returns list of int devide by given delimiter. -func (k *Key) Ints(delim string) []int { - strs := k.Strings(delim) - vals := make([]int, len(strs)) - for i := range strs { - vals[i], _ = strconv.Atoi(strs[i]) - } - return vals -} - -// Int64s returns list of int64 devide by given delimiter. -func (k *Key) Int64s(delim string) []int64 { - strs := k.Strings(delim) - vals := make([]int64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseInt(strs[i], 10, 64) - } - return vals -} - -// Uints returns list of uint devide by given delimiter. -func (k *Key) Uints(delim string) []uint { - strs := k.Strings(delim) - vals := make([]uint, len(strs)) - for i := range strs { - u, _ := strconv.ParseUint(strs[i], 10, 64) - vals[i] = uint(u) - } - return vals -} - -// Uint64s returns list of uint64 devide by given delimiter. -func (k *Key) Uint64s(delim string) []uint64 { - strs := k.Strings(delim) - vals := make([]uint64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseUint(strs[i], 10, 64) - } - return vals -} - -// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. -func (k *Key) TimesFormat(format, delim string) []time.Time { - strs := k.Strings(delim) - vals := make([]time.Time, len(strs)) - for i := range strs { - vals[i], _ = time.Parse(format, strs[i]) - } - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - k.value = v -} - -// _________ __ .__ -// / _____/ ____ _____/ |_|__| ____ ____ -// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ -// / \ ___/\ \___| | | ( <_> ) | \ -// /_______ /\___ >\___ >__| |__|\____/|___| / -// \/ \/ \/ \/ - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string -} - -func newSection(f *File, name string) *Section { - return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - s.keys[name].value = val - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = &Key{s, "", name, val, false} - s.keysHash[name] = val - return s.keys[name], nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) Haskey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// HasKey returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list + return ioutil.NopCloser(bytes.NewReader(s.data)), nil } -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser } -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil } -// ___________.__.__ -// \_ _____/|__| | ____ -// | __) | | | _/ __ \ -// | \ | | |_\ ___/ -// \___ / |__|____/\___ > -// \/ \/ - // File represents a combination of a or more INI file(s) in memory. type File struct { // Should make things safe, but sometimes doesn't matter. @@ -719,16 +137,20 @@ type File struct { // To keep data in order. sectionList []string + options LoadOptions + NameMapper + ValueMapper } // newFile initializes File object with given data sources. -func newFile(dataSources []dataSource) *File { +func newFile(dataSources []dataSource, opts LoadOptions) *File { return &File{ BlockMode: true, dataSources: dataSources, sections: make(map[string]*Section), sectionList: make([]string, 0, 10), + options: opts, } } @@ -738,14 +160,33 @@ func parseDataSource(source interface{}) (dataSource, error) { return sourceFile{s}, nil case []byte: return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil default: return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) } } -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -func Load(source interface{}, others ...interface{}) (_ *File, err error) { +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { sources := make([]dataSource, len(others)+1) sources[0], err = parseDataSource(source) if err != nil { @@ -757,8 +198,36 @@ func Load(source interface{}, others ...interface{}) (_ *File, err error) { return nil, err } } - f := newFile(sources) - return f, f.Reload() + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) } // Empty returns an empty file object. @@ -772,6 +241,8 @@ func Empty() *File { func (f *File) NewSection(name string) (*Section, error) { if len(name) == 0 { return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) } if f.BlockMode { @@ -788,6 +259,18 @@ func (f *File) NewSection(name string) (*Section, error) { return f.sections[name], nil } +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + // NewSections creates a list of sections. func (f *File) NewSections(names ...string) (err error) { for _, name := range names { @@ -802,6 +285,8 @@ func (f *File) NewSections(names ...string) (err error) { func (f *File) GetSection(name string) (*Section, error) { if len(name) == 0 { name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) } if f.BlockMode { @@ -811,7 +296,7 @@ func (f *File) GetSection(name string) (*Section, error) { sec := f.sections[name] if sec == nil { - return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) + return nil, fmt.Errorf("section '%s' does not exist", name) } return sec, nil } @@ -837,6 +322,11 @@ func (f *File) Sections() []*Section { return sections } +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + // SectionStrings returns list of section names. func (f *File) SectionStrings() []string { list := make([]string, len(f.sectionList)) @@ -864,240 +354,6 @@ func (f *File) DeleteSection(name string) { } } -func cutComment(str string) string { - i := strings.Index(str, "#") - if i == -1 { - return str - } - return str[:i] -} - -func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) { - isEnd := false - for { - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", err - } - isEnd = true - } - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - break - } - val += next - if isEnd { - return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) { - isEnd := false - for { - valLen := len(val) - if valLen == 0 || val[valLen-1] != '\\' { - break - } - val = val[:valLen-1] - - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", isEnd, err - } - isEnd = true - } - - next = strings.TrimSpace(next) - if len(next) == 0 { - break - } - val += next - } - return val, isEnd, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) error { - buf := bufio.NewReader(reader) - - // Handle BOM-UTF8. - // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding - mask, err := buf.Peek(3) - if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { - buf.Read(mask) - } - - count := 1 - comments := "" - isEnd := false - - section, err := f.NewSection(DEFAULT_SECTION) - if err != nil { - return err - } - - for { - line, err := buf.ReadString('\n') - line = strings.TrimSpace(line) - length := len(line) - - // Check error and ignore io.EOF just for a moment. - if err != nil { - if err != io.EOF { - return fmt.Errorf("error reading next line: %v", err) - } - // The last line of file could be an empty line. - if length == 0 { - break - } - isEnd = true - } - - // Skip empty lines. - if length == 0 { - continue - } - - switch { - case line[0] == '#' || line[0] == ';': // Comments. - if len(comments) == 0 { - comments = line - } else { - comments += LineBreak + line - } - continue - case line[0] == '[' && line[length-1] == ']': // New sction. - section, err = f.NewSection(strings.TrimSpace(line[1 : length-1])) - if err != nil { - return err - } - - if len(comments) > 0 { - section.Comment = comments - comments = "" - } - // Reset counter. - count = 1 - continue - } - - // Other possibilities. - var ( - i int - keyQuote string - kname string - valQuote string - val string - ) - - // Key name surrounded by quotes. - if line[0] == '"' { - if length > 6 && line[0:3] == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - if len(keyQuote) > 0 { - qLen := len(keyQuote) - pos := strings.Index(line[qLen:], keyQuote) - if pos == -1 { - return fmt.Errorf("error parsing line: missing closing key quote: %s", line) - } - pos = pos + qLen - i = strings.IndexAny(line[pos:], "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == pos { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - i = i + pos - kname = line[qLen:pos] // Just keep spaces inside quotes. - } else { - i = strings.IndexAny(line, "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == 0 { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - kname = strings.TrimSpace(line[0:i]) - } - - isAutoIncr := false - // Auto increment. - if kname == "-" { - isAutoIncr = true - kname = "#" + fmt.Sprint(count) - count++ - } - - lineRight := strings.TrimSpace(line[i+1:]) - lineRightLength := len(lineRight) - firstChar := "" - if lineRightLength >= 2 { - firstChar = lineRight[0:1] - } - if firstChar == "`" { - valQuote = "`" - } else if firstChar == `"` { - if lineRightLength >= 3 && lineRight[0:3] == `"""` { - valQuote = `"""` - } else { - valQuote = `"` - } - } else if firstChar == `'` { - valQuote = `'` - } - - if len(valQuote) > 0 { - qLen := len(valQuote) - pos := strings.LastIndex(lineRight[qLen:], valQuote) - // For multiple-line value check. - if pos == -1 { - if valQuote == `"` || valQuote == `'` { - return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line) - } - - val = lineRight[qLen:] + "\n" - val, err = checkMultipleLines(buf, line, val, valQuote) - if err != nil { - return err - } - } else { - val = lineRight[qLen : pos+qLen] - } - } else { - val = strings.TrimSpace(cutComment(lineRight)) - val, isEnd, err = checkContinuationLines(buf, val) - if err != nil { - return err - } - } - - k, err := section.NewKey(kname, val) - if err != nil { - return err - } - k.isAutoIncr = isAutoIncr - if len(comments) > 0 { - k.Comment = comments - comments = "" - } - - if isEnd { - break - } - } - return nil -} - func (f *File) reload(s dataSource) error { r, err := s.ReadCloser() if err != nil { @@ -1112,6 +368,11 @@ func (f *File) reload(s dataSource) error { func (f *File) Reload() (err error) { for _, s := range f.dataSources { if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } return err } } @@ -1135,8 +396,7 @@ func (f *File) Append(source interface{}, others ...interface{}) error { return f.Reload() } -// WriteToIndent writes file content into io.Writer with given value indention. -func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { equalSign := "=" if PrettyFormat { equalSign = " = " @@ -1150,22 +410,51 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { if sec.Comment[0] != '#' && sec.Comment[0] != ';' { sec.Comment = "; " + sec.Comment } - if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { - return 0, err + if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil { + return nil, err } } - if i > 0 { - if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return 0, err + if i > 0 || DefaultHeader { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err } } else { - // Write nothing if default section is empty. + // Write nothing if default section is empty if len(sec.keyList) == 0 { continue } } + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: for _, kname := range sec.keyList { key := sec.Key(kname) if len(key.Comment) > 0 { @@ -1175,8 +464,8 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { if key.Comment[0] != '#' && key.Comment[0] != ';' { key.Comment = "; " + key.Comment } - if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { - return 0, err + if _, err := buf.WriteString(key.Comment + LineBreak); err != nil { + return nil, err } } @@ -1185,31 +474,62 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { } switch { - case key.isAutoIncr: + case key.isAutoIncrement: kname = "-" - case strings.Contains(kname, "`") || strings.Contains(kname, `"`): - kname = `"""` + kname + `"""` - case strings.Contains(kname, `=`) || strings.Contains(kname, `:`): + case strings.ContainsAny(kname, "\"=:"): kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` } - val := key.value - // In case key value contains "\n", "`" or "\"". - if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) || - strings.Contains(val, "#") { - val = `"""` + val + `"""` - } - if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { - return 0, err + for _, val := range key.ValueWithShadows() { + if _, err := buf.WriteString(kname); err != nil { + return nil, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return nil, err + } } } - // Put a line between sections. - if _, err = buf.WriteString(LineBreak); err != nil { - return 0, err + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } } } + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } return buf.WriteTo(w) } @@ -1222,23 +542,12 @@ func (f *File) WriteTo(w io.Writer) (int64, error) { func (f *File) SaveToIndent(filename, indent string) error { // Note: Because we are truncating with os.Create, // so it's safer to save to a temporary file location and rename afte done. - tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" - defer os.Remove(tmpPath) - - fw, err := os.Create(tmpPath) + buf, err := f.writeToBuffer(indent) if err != nil { return err } - if _, err = f.WriteToIndent(fw, indent); err != nil { - fw.Close() - return err - } - fw.Close() - - // Remove old file and rename the new one. - os.Remove(filename) - return os.Rename(tmpPath, filename) + return ioutil.WriteFile(filename, buf.Bytes(), 0666) } // SaveTo writes content to file system. diff --git a/vendor/github.com/go-ini/ini/ini_test.go b/vendor/github.com/go-ini/ini/ini_test.go index 50e414afbe8..b3dd217c643 100644 --- a/vendor/github.com/go-ini/ini/ini_test.go +++ b/vendor/github.com/go-ini/ini/ini_test.go @@ -15,7 +15,8 @@ package ini import ( - "fmt" + "bytes" + "io/ioutil" "strings" "testing" "time" @@ -31,19 +32,19 @@ func Test_Version(t *testing.T) { const _CONF_DATA = ` ; Package name -NAME = ini +NAME = ini ; Package version -VERSION = v1 +VERSION = v1 ; Package import path IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s # Information about package author # Bio can be written in multiple lines. [author] -NAME = Unknwon # Succeeding comment +NAME = Unknwon ; Succeeding comment E-MAIL = fake@localhost GITHUB = https://github.com/%(NAME)s -BIO = """Gopher. +BIO = """Gopher. Coding addict. Good man. """ # Succeeding comment @@ -60,33 +61,42 @@ UNUSED_KEY = should be deleted -: Support load multiple files to overwrite key values [types] -STRING = str -BOOL = true +STRING = str +BOOL = true BOOL_FALSE = false -FLOAT64 = 1.25 -INT = 10 -TIME = 2015-01-01T20:17:05Z -DURATION = 2h45m -UINT = 3 +FLOAT64 = 1.25 +INT = 10 +TIME = 2015-01-01T20:17:05Z +DURATION = 2h45m +UINT = 3 [array] -STRINGS = en, zh, de +STRINGS = en, zh, de FLOAT64S = 1.1, 2.2, 3.3 -INTS = 1, 2, 3 -UINTS = 1, 2, 3 -TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z +INTS = 1, 2, 3 +UINTS = 1, 2, 3 +TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z [note] empty_lines = next line is empty\ +; Comment before the section +[comments] ; This is a comment for the section too +; Comment before key +key = "value" +key2 = "value2" ; This is a comment for key2 +key3 = "one", "two", "three" + [advance] value with quotes = "some value" value quote2 again = 'some value' -true = """"2+3=5"""" +includes comment sign = ` + "`" + "my#password" + "`" + ` +includes comment sign2 = ` + "`" + "my;password" + "`" + ` +true = 2+3=5 "1+1=2" = true """6+1=7""" = true """` + "`" + `5+5` + "`" + `""" = 10 -""""6+6"""" = 12 +` + "`" + `"6+6"` + "`" + ` = 12 ` + "`" + `7-2=4` + "`" + ` = false ADDRESS = ` + "`" + `404 road, NotFound, State, 50000` + "`" + ` @@ -107,9 +117,21 @@ func Test_Load(t *testing.T) { }) Convey("Load with multiple data sources", func() { - cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini", ioutil.NopCloser(bytes.NewReader([]byte(_CONF_DATA)))) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + f, err := Load([]byte(_CONF_DATA), "testdata/404.ini") + So(err, ShouldNotBeNil) + So(f, ShouldBeNil) + }) + + Convey("Load with io.ReadCloser", func() { + cfg, err := Load(ioutil.NopCloser(bytes.NewReader([]byte(_CONF_DATA)))) So(err, ShouldBeNil) So(cfg, ShouldNotBeNil) + + So(cfg.Section("").Key("NAME").String(), ShouldEqual, "ini") }) }) @@ -119,8 +141,9 @@ func Test_Load(t *testing.T) { _, err := Load(_CONF_DATA) So(err, ShouldNotBeNil) - _, err = Load("testdata/404.ini") + f, err := Load("testdata/404.ini") So(err, ShouldNotBeNil) + So(f, ShouldBeNil) _, err = Load(1) So(err, ShouldNotBeNil) @@ -129,9 +152,12 @@ func Test_Load(t *testing.T) { So(err, ShouldNotBeNil) }) - Convey("Load with empty section name", func() { + Convey("Load with bad section name", func() { _, err := Load([]byte("[]")) So(err, ShouldNotBeNil) + + _, err = Load([]byte("[")) + So(err, ShouldNotBeNil) }) Convey("Load with bad keys", func() { @@ -154,310 +180,118 @@ func Test_Load(t *testing.T) { Convey("Load with bad values", func() { _, err := Load([]byte(`name="""Unknwon`)) So(err, ShouldNotBeNil) - - _, err = Load([]byte(`key = "value`)) - So(err, ShouldNotBeNil) }) }) -} -func Test_Values(t *testing.T) { - Convey("Test getting and setting values", t, func() { - cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + Convey("Get section and key insensitively", t, func() { + cfg, err := InsensitiveLoad([]byte(_CONF_DATA), "testdata/conf.ini") So(err, ShouldBeNil) So(cfg, ShouldNotBeNil) - Convey("Get values in default section", func() { - sec := cfg.Section("") - So(sec, ShouldNotBeNil) - So(sec.Key("NAME").Value(), ShouldEqual, "ini") - So(sec.Key("NAME").String(), ShouldEqual, "ini") - So(sec.Key("NAME").Validate(func(in string) string { - return in - }), ShouldEqual, "ini") - So(sec.Key("NAME").Comment, ShouldEqual, "; Package name") - So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1") - }) - - Convey("Get values in non-default section", func() { - sec := cfg.Section("author") - So(sec, ShouldNotBeNil) - So(sec.Key("NAME").String(), ShouldEqual, "Unknwon") - So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon") - - sec = cfg.Section("package") - So(sec, ShouldNotBeNil) - So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") - }) - - Convey("Get auto-increment key names", func() { - keys := cfg.Section("features").Keys() - for i, k := range keys { - So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1)) - } - }) - - Convey("Get overwrite value", func() { - So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") - }) - - Convey("Get sections", func() { - sections := cfg.Sections() - for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "advance"} { - So(sections[i].Name(), ShouldEqual, name) - } - }) - - Convey("Get parent section value", func() { - So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") - }) - - Convey("Get multiple line value", func() { - So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n") - }) - - Convey("Get values with type", func() { - sec := cfg.Section("types") - v1, err := sec.Key("BOOL").Bool() - So(err, ShouldBeNil) - So(v1, ShouldBeTrue) - - v1, err = sec.Key("BOOL_FALSE").Bool() - So(err, ShouldBeNil) - So(v1, ShouldBeFalse) - - v2, err := sec.Key("FLOAT64").Float64() - So(err, ShouldBeNil) - So(v2, ShouldEqual, 1.25) + sec, err := cfg.GetSection("Author") + So(err, ShouldBeNil) + So(sec, ShouldNotBeNil) - v3, err := sec.Key("INT").Int() - So(err, ShouldBeNil) - So(v3, ShouldEqual, 10) + key, err := sec.GetKey("E-mail") + So(err, ShouldBeNil) + So(key, ShouldNotBeNil) + }) - v4, err := sec.Key("INT").Int64() - So(err, ShouldBeNil) - So(v4, ShouldEqual, 10) + Convey("Load with ignoring continuation lines", t, func() { + cfg, err := LoadSources(LoadOptions{IgnoreContinuation: true}, []byte(`key1=a\b\ +key2=c\d\`)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) - v5, err := sec.Key("UINT").Uint() - So(err, ShouldBeNil) - So(v5, ShouldEqual, 3) + So(cfg.Section("").Key("key1").String(), ShouldEqual, `a\b\`) + So(cfg.Section("").Key("key2").String(), ShouldEqual, `c\d\`) + }) - v6, err := sec.Key("UINT").Uint64() - So(err, ShouldBeNil) - So(v6, ShouldEqual, 3) + Convey("Load with ignoring inline comments", t, func() { + cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, []byte(`key1=value ;comment +key2=value #comment2`)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) - t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") - So(err, ShouldBeNil) - v7, err := sec.Key("TIME").Time() - So(err, ShouldBeNil) - So(v7.String(), ShouldEqual, t.String()) - - Convey("Must get values with type", func() { - So(sec.Key("STRING").MustString("404"), ShouldEqual, "str") - So(sec.Key("BOOL").MustBool(), ShouldBeTrue) - So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25) - So(sec.Key("INT").MustInt(), ShouldEqual, 10) - So(sec.Key("INT").MustInt64(), ShouldEqual, 10) - So(sec.Key("UINT").MustUint(), ShouldEqual, 3) - So(sec.Key("UINT").MustUint64(), ShouldEqual, 3) - So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String()) - - dur, err := time.ParseDuration("2h45m") - So(err, ShouldBeNil) - So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds()) - - Convey("Must get values with default value", func() { - So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404") - So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue) - So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5) - So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15) - So(sec.Key("INT_404").MustInt64(15), ShouldEqual, 15) - So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6) - So(sec.Key("UINT_404").MustUint64(6), ShouldEqual, 6) - - t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z") - So(err, ShouldBeNil) - So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String()) - - So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds()) - }) - }) - }) + So(cfg.Section("").Key("key1").String(), ShouldEqual, `value ;comment`) + So(cfg.Section("").Key("key2").String(), ShouldEqual, `value #comment2`) - Convey("Get value with candidates", func() { - sec := cfg.Section("types") - So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str") - So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) - So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10) - So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10) - So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3) - So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3) + var buf bytes.Buffer + cfg.WriteTo(&buf) + So(buf.String(), ShouldEqual, `key1 = value ;comment +key2 = value #comment2 - zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") - So(err, ShouldBeNil) - t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") - So(err, ShouldBeNil) - So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) - - Convey("Get value with candidates and default value", func() { - So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str") - So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) - So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10) - So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10) - So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3) - So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3) - So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) - }) - }) +`) + }) - Convey("Get values in range", func() { - sec := cfg.Section("types") - So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25) - So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10) - So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10) + Convey("Load with boolean type keys", t, func() { + cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, []byte(`key1=hello +key2 +#key3 +key4 +key5`)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) - minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") - So(err, ShouldBeNil) - midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z") - So(err, ShouldBeNil) - maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z") - So(err, ShouldBeNil) - t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") - So(err, ShouldBeNil) - So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String()) - - Convey("Get value in range with default value", func() { - So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5) - So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7) - So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7) - So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String()) - }) - }) + So(strings.Join(cfg.Section("").KeyStrings(), ","), ShouldEqual, "key1,key2,key4,key5") + So(cfg.Section("").Key("key2").MustBool(false), ShouldBeTrue) + + var buf bytes.Buffer + cfg.WriteTo(&buf) + // there is always a trailing \n at the end of the section + So(buf.String(), ShouldEqual, `key1 = hello +key2 +#key3 +key4 +key5 +`) + }) +} - Convey("Get values into slice", func() { - sec := cfg.Section("array") - So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de") - So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0) +func Test_File_ChildSections(t *testing.T) { + Convey("Find child sections by parent name", t, func() { + cfg, err := Load([]byte(` +[node] - vals1 := sec.Key("FLOAT64S").Float64s(",") - for i, v := range []float64{1.1, 2.2, 3.3} { - So(vals1[i], ShouldEqual, v) - } +[node.biz1] - vals2 := sec.Key("INTS").Ints(",") - for i, v := range []int{1, 2, 3} { - So(vals2[i], ShouldEqual, v) - } +[node.biz2] - vals3 := sec.Key("INTS").Int64s(",") - for i, v := range []int64{1, 2, 3} { - So(vals3[i], ShouldEqual, v) - } +[node.biz3] - vals4 := sec.Key("UINTS").Uints(",") - for i, v := range []uint{1, 2, 3} { - So(vals4[i], ShouldEqual, v) - } +[node.bizN] +`)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) - vals5 := sec.Key("UINTS").Uint64s(",") - for i, v := range []uint64{1, 2, 3} { - So(vals5[i], ShouldEqual, v) - } + children := cfg.ChildSections("node") + names := make([]string, len(children)) + for i := range children { + names[i] = children[i].name + } + So(strings.Join(names, ","), ShouldEqual, "node.biz1,node.biz2,node.biz3,node.bizN") + }) +} - t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") +func Test_LooseLoad(t *testing.T) { + Convey("Loose load from data sources", t, func() { + Convey("Loose load mixed with nonexistent file", func() { + cfg, err := LooseLoad("testdata/404.ini") So(err, ShouldBeNil) - vals6 := sec.Key("TIMES").Times(",") - for i, v := range []time.Time{t, t, t} { - So(vals6[i].String(), ShouldEqual, v.String()) + So(cfg, ShouldNotBeNil) + var fake struct { + Name string `ini:"name"` } - }) - - Convey("Get key hash", func() { - cfg.Section("").KeysHash() - }) - - Convey("Set key value", func() { - k := cfg.Section("author").Key("NAME") - k.SetValue("无闻") - So(k.String(), ShouldEqual, "无闻") - }) - - Convey("Get key strings", func() { - So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT") - }) - - Convey("Delete a key", func() { - cfg.Section("package.sub").DeleteKey("UNUSED_KEY") - _, err := cfg.Section("package.sub").GetKey("UNUSED_KEY") - So(err, ShouldNotBeNil) - }) - - Convey("Has Key", func() { - sec := cfg.Section("package.sub") - haskey1 := sec.Haskey("UNUSED_KEY") - haskey2 := sec.Haskey("CLONE_URL") - haskey3 := sec.Haskey("CLONE_URL_NO") - So(haskey1, ShouldBeTrue) - So(haskey2, ShouldBeTrue) - So(haskey3, ShouldBeFalse) - }) - - Convey("Has Value", func() { - sec := cfg.Section("author") - hasvalue1 := sec.HasValue("Unknwon") - hasvalue2 := sec.HasValue("doc") - So(hasvalue1, ShouldBeTrue) - So(hasvalue2, ShouldBeFalse) - }) - - Convey("Get section strings", func() { - So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,advance") - }) - - Convey("Delete a section", func() { - cfg.DeleteSection("") - So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION) - }) + So(cfg.MapTo(&fake), ShouldBeNil) - Convey("Create new sections", func() { - cfg.NewSections("test", "test2") - _, err := cfg.GetSection("test") - So(err, ShouldBeNil) - _, err = cfg.GetSection("test2") + cfg, err = LooseLoad([]byte("name=Unknwon"), "testdata/404.ini") So(err, ShouldBeNil) + So(cfg.Section("").Key("name").String(), ShouldEqual, "Unknwon") + So(cfg.MapTo(&fake), ShouldBeNil) + So(fake.Name, ShouldEqual, "Unknwon") }) }) - Convey("Test getting and setting bad values", t, func() { - cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) - - Convey("Create new key with empty name", func() { - k, err := cfg.Section("").NewKey("", "") - So(err, ShouldNotBeNil) - So(k, ShouldBeNil) - }) - - Convey("Create new section with empty name", func() { - s, err := cfg.NewSection("") - So(err, ShouldNotBeNil) - So(s, ShouldBeNil) - }) - - Convey("Create new sections with empty name", func() { - So(cfg.NewSections(""), ShouldNotBeNil) - }) - - Convey("Get section that not exists", func() { - s, err := cfg.GetSection("404") - So(err, ShouldNotBeNil) - So(s, ShouldBeNil) - - s = cfg.Section("404") - So(s, ShouldNotBeNil) - }) - }) } func Test_File_Append(t *testing.T) { @@ -475,7 +309,15 @@ func Test_File_Append(t *testing.T) { }) } -func Test_File_SaveTo(t *testing.T) { +func Test_File_WriteTo(t *testing.T) { + Convey("Write to somewhere", t, func() { + var buf bytes.Buffer + cfg := Empty() + cfg.WriteTo(&buf) + }) +} + +func Test_File_SaveTo_WriteTo(t *testing.T) { Convey("Save file", t, func() { cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") So(err, ShouldBeNil) @@ -485,46 +327,165 @@ func Test_File_SaveTo(t *testing.T) { cfg.Section("author").Comment = `Information about package author # Bio can be written in multiple lines.` cfg.Section("advanced").Key("val w/ pound").SetValue("my#password") + cfg.Section("advanced").Key("longest key has a colon : yes/no").SetValue("yes") So(cfg.SaveTo("testdata/conf_out.ini"), ShouldBeNil) cfg.Section("author").Key("NAME").Comment = "This is author name" + So(cfg.SaveToIndent("testdata/conf_out.ini", "\t"), ShouldBeNil) + + var buf bytes.Buffer + _, err = cfg.WriteToIndent(&buf, "\t") + So(err, ShouldBeNil) + So(buf.String(), ShouldEqual, `; Package name +NAME = ini +; Package version +VERSION = v1 +; Package import path +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +; Information about package author +# Bio can be written in multiple lines. +[author] + ; This is author name + NAME = Unknwon + E-MAIL = u@gogs.io + GITHUB = https://github.com/%(NAME)s + # Succeeding comment + BIO = """Gopher. +Coding addict. +Good man. +""" + +[package] + CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] + UNUSED_KEY = should be deleted + +[features] + - = Support read/write comments of keys and sections + - = Support auto-increment of key names + - = Support load multiple files to overwrite key values + +[types] + STRING = str + BOOL = true + BOOL_FALSE = false + FLOAT64 = 1.25 + INT = 10 + TIME = 2015-01-01T20:17:05Z + DURATION = 2h45m + UINT = 3 + +[array] + STRINGS = en, zh, de + FLOAT64S = 1.1, 2.2, 3.3 + INTS = 1, 2, 3 + UINTS = 1, 2, 3 + TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z + +[note] + empty_lines = next line is empty + +; Comment before the section +; This is a comment for the section too +[comments] + ; Comment before key + key = value + ; This is a comment for key2 + key2 = value2 + key3 = "one", "two", "three" + +[advance] + value with quotes = some value + value quote2 again = some value + includes comment sign = `+"`"+"my#password"+"`"+` + includes comment sign2 = `+"`"+"my;password"+"`"+` + true = 2+3=5 + `+"`"+`1+1=2`+"`"+` = true + `+"`"+`6+1=7`+"`"+` = true + """`+"`"+`5+5`+"`"+`""" = 10 + `+"`"+`"6+6"`+"`"+` = 12 + `+"`"+`7-2=4`+"`"+` = false + ADDRESS = """404 road, +NotFound, State, 50000""" + two_lines = how about continuation lines? + lots_of_lines = 1 2 3 4 + +[advanced] + val w/ pound = `+"`"+`my#password`+"`"+` + `+"`"+`longest key has a colon : yes/no`+"`"+` = yes + +`) }) } -func Benchmark_Key_Value(b *testing.B) { - c, _ := Load([]byte(_CONF_DATA)) - for i := 0; i < b.N; i++ { - c.Section("").Key("NAME").Value() +func Test_File_WriteTo_SectionRaw(t *testing.T) { + Convey("Write a INI with a raw section", t, func() { + var buf bytes.Buffer + cfg, err := LoadSources( + LoadOptions{ + UnparseableSections: []string{"CORE_LESSON", "COMMENTS"}, + }, + "testdata/aicc.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + cfg.WriteToIndent(&buf, "\t") + So(buf.String(), ShouldEqual, `[Core] + Lesson_Location = 87 + Lesson_Status = C + Score = 3 + Time = 00:02:30 + +[CORE_LESSON] +my lesson state data – 1111111111111111111000000000000000001110000 +111111111111111111100000000000111000000000 – end my lesson state data +[COMMENTS] +<1> This slide has the fuel listed in the wrong units +`) + }) +} + +// Helpers for slice tests. +func float64sEqual(values []float64, expected ...float64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func intsEqual(values []int, expected ...int) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) } } -func Benchmark_Key_String(b *testing.B) { - c, _ := Load([]byte(_CONF_DATA)) - for i := 0; i < b.N; i++ { - c.Section("").Key("NAME").String() +func int64sEqual(values []int64, expected ...int64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) } } -func Benchmark_Key_Value_NonBlock(b *testing.B) { - c, _ := Load([]byte(_CONF_DATA)) - c.BlockMode = false - for i := 0; i < b.N; i++ { - c.Section("").Key("NAME").Value() +func uintsEqual(values []uint, expected ...uint) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) } } -func Benchmark_Key_String_NonBlock(b *testing.B) { - c, _ := Load([]byte(_CONF_DATA)) - c.BlockMode = false - for i := 0; i < b.N; i++ { - c.Section("").Key("NAME").String() +func uint64sEqual(values []uint64, expected ...uint64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) } } -func Benchmark_Key_SetValue(b *testing.B) { - c, _ := Load([]byte(_CONF_DATA)) - for i := 0; i < b.N; i++ { - c.Section("").Key("NAME").SetValue("10") +func timesEqual(values []time.Time, expected ...time.Time) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i].String(), ShouldEqual, v.String()) } } diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 00000000000..838356af01b --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,699 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + Comment string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + return []string{k.value} + } + vals := make([]string, len(k.shadows)+1) + vals[0] = k.value + for i := range k.shadows { + vals[i+1] = k.shadows[i].value + } + return vals +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/key_test.go b/vendor/github.com/go-ini/ini/key_test.go new file mode 100644 index 00000000000..1281d5bf0c6 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key_test.go @@ -0,0 +1,573 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Key(t *testing.T) { + Convey("Test getting and setting values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get values in default section", func() { + sec := cfg.Section("") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").Value(), ShouldEqual, "ini") + So(sec.Key("NAME").String(), ShouldEqual, "ini") + So(sec.Key("NAME").Validate(func(in string) string { + return in + }), ShouldEqual, "ini") + So(sec.Key("NAME").Comment, ShouldEqual, "; Package name") + So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1") + }) + + Convey("Get values in non-default section", func() { + sec := cfg.Section("author") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").String(), ShouldEqual, "Unknwon") + So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon") + + sec = cfg.Section("package") + So(sec, ShouldNotBeNil) + So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get auto-increment key names", func() { + keys := cfg.Section("features").Keys() + for i, k := range keys { + So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1)) + } + }) + + Convey("Get parent-keys that are available to the child section", func() { + parentKeys := cfg.Section("package.sub").ParentKeys() + for _, k := range parentKeys { + So(k.Name(), ShouldEqual, "CLONE_URL") + } + }) + + Convey("Get overwrite value", func() { + So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") + }) + + Convey("Get sections", func() { + sections := cfg.Sections() + for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "comments", "advance"} { + So(sections[i].Name(), ShouldEqual, name) + } + }) + + Convey("Get parent section value", func() { + So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + So(cfg.Section("package.fake.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get multiple line value", func() { + So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n") + }) + + Convey("Get values with type", func() { + sec := cfg.Section("types") + v1, err := sec.Key("BOOL").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeTrue) + + v1, err = sec.Key("BOOL_FALSE").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeFalse) + + v2, err := sec.Key("FLOAT64").Float64() + So(err, ShouldBeNil) + So(v2, ShouldEqual, 1.25) + + v3, err := sec.Key("INT").Int() + So(err, ShouldBeNil) + So(v3, ShouldEqual, 10) + + v4, err := sec.Key("INT").Int64() + So(err, ShouldBeNil) + So(v4, ShouldEqual, 10) + + v5, err := sec.Key("UINT").Uint() + So(err, ShouldBeNil) + So(v5, ShouldEqual, 3) + + v6, err := sec.Key("UINT").Uint64() + So(err, ShouldBeNil) + So(v6, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + v7, err := sec.Key("TIME").Time() + So(err, ShouldBeNil) + So(v7.String(), ShouldEqual, t.String()) + + Convey("Must get values with type", func() { + So(sec.Key("STRING").MustString("404"), ShouldEqual, "str") + So(sec.Key("BOOL").MustBool(), ShouldBeTrue) + So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25) + So(sec.Key("INT").MustInt(), ShouldEqual, 10) + So(sec.Key("INT").MustInt64(), ShouldEqual, 10) + So(sec.Key("UINT").MustUint(), ShouldEqual, 3) + So(sec.Key("UINT").MustUint64(), ShouldEqual, 3) + So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must get values with default value", func() { + So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404") + So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue) + So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5) + So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15) + So(sec.Key("INT64_404").MustInt64(15), ShouldEqual, 15) + So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6) + So(sec.Key("UINT64_404").MustUint64(6), ShouldEqual, 6) + + t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String()) + + So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must should set default as key value", func() { + So(sec.Key("STRING_404").String(), ShouldEqual, "404") + So(sec.Key("BOOL_404").String(), ShouldEqual, "true") + So(sec.Key("FLOAT64_404").String(), ShouldEqual, "2.5") + So(sec.Key("INT_404").String(), ShouldEqual, "15") + So(sec.Key("INT64_404").String(), ShouldEqual, "15") + So(sec.Key("UINT_404").String(), ShouldEqual, "6") + So(sec.Key("UINT64_404").String(), ShouldEqual, "6") + So(sec.Key("TIME_404").String(), ShouldEqual, "2014-01-01T20:17:05Z") + So(sec.Key("DURATION_404").String(), ShouldEqual, "2h45m0s") + }) + }) + }) + }) + + Convey("Get value with candidates", func() { + sec := cfg.Section("types") + So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3) + + zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + + Convey("Get value with candidates and default value", func() { + So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values in range", func() { + sec := cfg.Section("types") + So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25) + So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10) + So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10) + + minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z") + So(err, ShouldBeNil) + maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String()) + + Convey("Get value in range with default value", func() { + So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5) + So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7) + So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7) + So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values into slice", func() { + sec := cfg.Section("array") + So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de") + So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0) + + vals1 := sec.Key("FLOAT64S").Float64s(",") + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2 := sec.Key("INTS").Ints(",") + intsEqual(vals2, 1, 2, 3) + + vals3 := sec.Key("INTS").Int64s(",") + int64sEqual(vals3, 1, 2, 3) + + vals4 := sec.Key("UINTS").Uints(",") + uintsEqual(vals4, 1, 2, 3) + + vals5 := sec.Key("UINTS").Uint64s(",") + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").Times(",") + timesEqual(vals6, t, t, t) + }) + + Convey("Get valid values into slice", func() { + sec := cfg.Section("array") + vals1 := sec.Key("FLOAT64S").ValidFloat64s(",") + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2 := sec.Key("INTS").ValidInts(",") + intsEqual(vals2, 1, 2, 3) + + vals3 := sec.Key("INTS").ValidInt64s(",") + int64sEqual(vals3, 1, 2, 3) + + vals4 := sec.Key("UINTS").ValidUints(",") + uintsEqual(vals4, 1, 2, 3) + + vals5 := sec.Key("UINTS").ValidUint64s(",") + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").ValidTimes(",") + timesEqual(vals6, t, t, t) + }) + + Convey("Get values one type into slice of another type", func() { + sec := cfg.Section("array") + vals1 := sec.Key("STRINGS").ValidFloat64s(",") + So(vals1, ShouldBeEmpty) + + vals2 := sec.Key("STRINGS").ValidInts(",") + So(vals2, ShouldBeEmpty) + + vals3 := sec.Key("STRINGS").ValidInt64s(",") + So(vals3, ShouldBeEmpty) + + vals4 := sec.Key("STRINGS").ValidUints(",") + So(vals4, ShouldBeEmpty) + + vals5 := sec.Key("STRINGS").ValidUint64s(",") + So(vals5, ShouldBeEmpty) + + vals6 := sec.Key("STRINGS").ValidTimes(",") + So(vals6, ShouldBeEmpty) + }) + + Convey("Get valid values into slice without errors", func() { + sec := cfg.Section("array") + vals1, err := sec.Key("FLOAT64S").StrictFloat64s(",") + So(err, ShouldBeNil) + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2, err := sec.Key("INTS").StrictInts(",") + So(err, ShouldBeNil) + intsEqual(vals2, 1, 2, 3) + + vals3, err := sec.Key("INTS").StrictInt64s(",") + So(err, ShouldBeNil) + int64sEqual(vals3, 1, 2, 3) + + vals4, err := sec.Key("UINTS").StrictUints(",") + So(err, ShouldBeNil) + uintsEqual(vals4, 1, 2, 3) + + vals5, err := sec.Key("UINTS").StrictUint64s(",") + So(err, ShouldBeNil) + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6, err := sec.Key("TIMES").StrictTimes(",") + So(err, ShouldBeNil) + timesEqual(vals6, t, t, t) + }) + + Convey("Get invalid values into slice", func() { + sec := cfg.Section("array") + vals1, err := sec.Key("STRINGS").StrictFloat64s(",") + So(vals1, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals2, err := sec.Key("STRINGS").StrictInts(",") + So(vals2, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals3, err := sec.Key("STRINGS").StrictInt64s(",") + So(vals3, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals4, err := sec.Key("STRINGS").StrictUints(",") + So(vals4, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals5, err := sec.Key("STRINGS").StrictUint64s(",") + So(vals5, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals6, err := sec.Key("STRINGS").StrictTimes(",") + So(vals6, ShouldBeEmpty) + So(err, ShouldNotBeNil) + }) + + Convey("Get key hash", func() { + cfg.Section("").KeysHash() + }) + + Convey("Set key value", func() { + k := cfg.Section("author").Key("NAME") + k.SetValue("无闻") + So(k.String(), ShouldEqual, "无闻") + }) + + Convey("Get key strings", func() { + So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT") + }) + + Convey("Delete a key", func() { + cfg.Section("package.sub").DeleteKey("UNUSED_KEY") + _, err := cfg.Section("package.sub").GetKey("UNUSED_KEY") + So(err, ShouldNotBeNil) + }) + + Convey("Has Key (backwards compatible)", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.Haskey("UNUSED_KEY") + haskey2 := sec.Haskey("CLONE_URL") + haskey3 := sec.Haskey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Key", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.HasKey("UNUSED_KEY") + haskey2 := sec.HasKey("CLONE_URL") + haskey3 := sec.HasKey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Value", func() { + sec := cfg.Section("author") + hasvalue1 := sec.HasValue("Unknwon") + hasvalue2 := sec.HasValue("doc") + So(hasvalue1, ShouldBeTrue) + So(hasvalue2, ShouldBeFalse) + }) + }) + + Convey("Test getting and setting bad values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Create new key with empty name", func() { + k, err := cfg.Section("").NewKey("", "") + So(err, ShouldNotBeNil) + So(k, ShouldBeNil) + }) + + Convey("Create new section with empty name", func() { + s, err := cfg.NewSection("") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + }) + + Convey("Create new sections with empty name", func() { + So(cfg.NewSections(""), ShouldNotBeNil) + }) + + Convey("Get section that not exists", func() { + s, err := cfg.GetSection("404") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + + s = cfg.Section("404") + So(s, ShouldNotBeNil) + }) + }) + + Convey("Test key hash clone", t, func() { + cfg, err := Load([]byte(strings.Replace("network=tcp,addr=127.0.0.1:6379,db=4,pool_size=100,idle_timeout=180", ",", "\n", -1))) + So(err, ShouldBeNil) + for _, v := range cfg.Section("").KeysHash() { + So(len(v), ShouldBeGreaterThan, 0) + } + }) + + Convey("Key has empty value", t, func() { + _conf := `key1= +key2= ; comment` + cfg, err := Load([]byte(_conf)) + So(err, ShouldBeNil) + So(cfg.Section("").Key("key1").Value(), ShouldBeEmpty) + }) +} + +const _CONF_GIT_CONFIG = ` +[remote "origin"] + url = https://github.com/Antergone/test1.git + url = https://github.com/Antergone/test2.git +` + +func Test_Key_Shadows(t *testing.T) { + Convey("Shadows keys", t, func() { + Convey("Disable shadows", func() { + cfg, err := Load([]byte(_CONF_GIT_CONFIG)) + So(err, ShouldBeNil) + So(cfg.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test2.git") + }) + + Convey("Enable shadows", func() { + cfg, err := ShadowLoad([]byte(_CONF_GIT_CONFIG)) + So(err, ShouldBeNil) + So(cfg.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test1.git") + So(strings.Join(cfg.Section(`remote "origin"`).Key("url").ValueWithShadows(), " "), ShouldEqual, + "https://github.com/Antergone/test1.git https://github.com/Antergone/test2.git") + + Convey("Save with shadows", func() { + var buf bytes.Buffer + _, err := cfg.WriteTo(&buf) + So(err, ShouldBeNil) + So(buf.String(), ShouldEqual, `[remote "origin"] +url = https://github.com/Antergone/test1.git +url = https://github.com/Antergone/test2.git + +`) + }) + }) + }) +} + +func newTestFile(block bool) *File { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = block + return c +} + +func Benchmark_Key_Value(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_Value_NonBlock(b *testing.B) { + c := newTestFile(false) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_Value_ViaSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").Value() + } +} + +func Benchmark_Key_Value_ViaSection_NonBlock(b *testing.B) { + c := newTestFile(false) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").Value() + } +} + +func Benchmark_Key_Value_Direct(b *testing.B) { + c := newTestFile(true) + key := c.Section("").Key("NAME") + for i := 0; i < b.N; i++ { + key.Value() + } +} + +func Benchmark_Key_Value_Direct_NonBlock(b *testing.B) { + c := newTestFile(false) + key := c.Section("").Key("NAME") + for i := 0; i < b.N; i++ { + key.Value() + } +} + +func Benchmark_Key_String(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + _ = c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_String_NonBlock(b *testing.B) { + c := newTestFile(false) + for i := 0; i < b.N; i++ { + _ = c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_String_ViaSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + _ = sec.Key("NAME").String() + } +} + +func Benchmark_Key_String_ViaSection_NonBlock(b *testing.B) { + c := newTestFile(false) + sec := c.Section("") + for i := 0; i < b.N; i++ { + _ = sec.Key("NAME").String() + } +} + +func Benchmark_Key_SetValue(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").SetValue("10") + } +} + +func Benchmark_Key_SetValue_VisSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").SetValue("10") + } +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 00000000000..69d5476273b --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,361 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + + // Check continuation lines when desired + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !ignoreInlineComment { + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + var inUnparseableSection bool + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { + kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/parser_test.go b/vendor/github.com/go-ini/ini/parser_test.go new file mode 100644 index 00000000000..05258195be7 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser_test.go @@ -0,0 +1,42 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_BOM(t *testing.T) { + Convey("Test handling BOM", t, func() { + Convey("UTF-8-BOM", func() { + cfg, err := Load("testdata/UTF-8-BOM.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") + }) + + Convey("UTF-16-LE-BOM", func() { + cfg, err := Load("testdata/UTF-16-LE-BOM.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + }) + + Convey("UTF-16-BE-BOM", func() { + }) + }) +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 00000000000..94f7375ed4c --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,248 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + "." + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]) + } + } + return children +} diff --git a/vendor/github.com/go-ini/ini/section_test.go b/vendor/github.com/go-ini/ini/section_test.go new file mode 100644 index 00000000000..80282c1979f --- /dev/null +++ b/vendor/github.com/go-ini/ini/section_test.go @@ -0,0 +1,75 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "strings" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Section(t *testing.T) { + Convey("Test CRD sections", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get section strings", func() { + So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,comments,advance") + }) + + Convey("Delete a section", func() { + cfg.DeleteSection("") + So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION) + }) + + Convey("Create new sections", func() { + cfg.NewSections("test", "test2") + _, err := cfg.GetSection("test") + So(err, ShouldBeNil) + _, err = cfg.GetSection("test2") + So(err, ShouldBeNil) + }) + }) +} + +func Test_SectionRaw(t *testing.T) { + Convey("Test section raw string", t, func() { + cfg, err := LoadSources( + LoadOptions{ + Insensitive: true, + UnparseableSections: []string{"core_lesson", "comments"}, + }, + "testdata/aicc.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get section strings", func() { + So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,core,core_lesson,comments") + }) + + Convey("Validate non-raw section", func() { + val, err := cfg.Section("core").GetKey("lesson_status") + So(err, ShouldBeNil) + So(val.String(), ShouldEqual, "C") + }) + + Convey("Validate raw section", func() { + So(cfg.Section("core_lesson").Body(), ShouldEqual, `my lesson state data – 1111111111111111111000000000000000001110000 +111111111111111111100000000000111000000000 – end my lesson state data`) + }) + }) +} \ No newline at end of file diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go index c1184371010..eeb8dabaaca 100644 --- a/vendor/github.com/go-ini/ini/struct.go +++ b/vendor/github.com/go-ini/ini/struct.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "reflect" + "strings" "time" "unicode" ) @@ -76,10 +77,80 @@ func parseDelim(actual string) string { var reflectTime = reflect.TypeOf(time.Now()).Kind() +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + // setWithProperType sets proper value to field based on its type, // but it does not return error for failing parsing, // because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { switch t.Kind() { case reflect.String: if len(key.String()) == 0 { @@ -89,78 +160,70 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri case reflect.Bool: boolVal, err := key.Bool() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.SetBool(boolVal) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: durationVal, err := key.Duration() - if err == nil { + // Skip zero value + if err == nil && int(durationVal) > 0 { field.Set(reflect.ValueOf(durationVal)) return nil } intVal, err := key.Int64() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.SetInt(intVal) // byte is an alias for uint8, so supporting uint8 breaks support for byte case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: durationVal, err := key.Duration() - if err == nil { + // Skip zero value + if err == nil && int(durationVal) > 0 { field.Set(reflect.ValueOf(durationVal)) return nil } uintVal, err := key.Uint64() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.SetUint(uintVal) - case reflect.Float64: + case reflect.Float32, reflect.Float64: floatVal, err := key.Float64() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.SetFloat(floatVal) case reflectTime: timeVal, err := key.Time() if err != nil { - return nil + return wrapStrictError(err, isStrict) } field.Set(reflect.ValueOf(timeVal)) case reflect.Slice: - vals := key.Strings(delim) - numVals := len(vals) - if numVals == 0 { - return nil - } - - sliceOf := field.Type().Elem().Kind() - - var times []time.Time - if sliceOf == reflectTime { - times = key.Times(delim) - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(times[i])) - default: - slice.Index(i).Set(reflect.ValueOf(vals[i])) - } - } - field.Set(slice) + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) default: return fmt.Errorf("unsupported type '%s'", t) } return nil } -func (s *Section) mapTo(val reflect.Value) error { +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + +func (s *Section) mapTo(val reflect.Value, isStrict bool) error { if val.Kind() == reflect.Ptr { val = val.Elem() } @@ -175,7 +238,8 @@ func (s *Section) mapTo(val reflect.Value) error { continue } - fieldName := s.parseFieldName(tpField.Name, tag) + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) if len(fieldName) == 0 || !field.CanSet() { continue } @@ -188,7 +252,7 @@ func (s *Section) mapTo(val reflect.Value) error { if isAnonymous || isStruct { if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field); err != nil { + if err = sec.mapTo(field, isStrict); err != nil { return fmt.Errorf("error mapping field(%s): %v", fieldName, err) } continue @@ -196,7 +260,8 @@ func (s *Section) mapTo(val reflect.Value) error { } if key, err := s.GetKey(fieldName); err == nil { - if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { return fmt.Errorf("error mapping field(%s): %v", fieldName, err) } } @@ -215,7 +280,22 @@ func (s *Section) MapTo(v interface{}) error { return errors.New("cannot map to non-pointer struct") } - return s.mapTo(val) + return s.mapTo(val, false) +} + +// MapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, true) } // MapTo maps file to given struct. @@ -223,6 +303,12 @@ func (f *File) MapTo(v interface{}) error { return f.Section("").MapTo(v) } +// MapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + // MapTo maps data sources to given struct with name mapper. func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { cfg, err := Load(source, others...) @@ -233,45 +319,104 @@ func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, other return cfg.MapTo(v) } +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + // MapTo maps data sources to given struct. func MapTo(v, source interface{}, others ...interface{}) error { return MapToWithMapper(v, nil, source, others...) } -// reflectWithProperType does the opposite thing with setWithProperType. +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { switch t.Kind() { case reflect.String: key.SetValue(field.String()) - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float64, - reflectTime: - key.SetValue(fmt.Sprint(field)) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) case reflect.Slice: - vals := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - isTime := fmt.Sprint(field.Type()) == "[]time.Time" - for i := 0; i < field.Len(); i++ { - if isTime { - buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) - } else { - buf.WriteString(fmt.Sprint(vals.Index(i))) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) + return reflectSliceWithProperType(key, field, delim) default: return fmt.Errorf("unsupported type '%s'", t) } return nil } +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() + } + return false +} + func (s *Section) reflectFrom(val reflect.Value) error { if val.Kind() == reflect.Ptr { val = val.Elem() @@ -287,13 +432,18 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } - fieldName := s.parseFieldName(tpField.Name, tag) + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) if len(fieldName) == 0 || !field.CanSet() { continue } if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct) { + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { // Note: The only error here is section doesn't exist. sec, err := s.f.GetSection(fieldName) if err != nil { @@ -301,7 +451,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { sec, _ = s.f.NewSection(fieldName) } if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } continue } @@ -312,7 +462,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { key, _ = s.NewKey(fieldName, "") } if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } } diff --git a/vendor/github.com/go-ini/ini/struct_test.go b/vendor/github.com/go-ini/ini/struct_test.go index d865ad78eb7..b8ba2529355 100644 --- a/vendor/github.com/go-ini/ini/struct_test.go +++ b/vendor/github.com/go-ini/ini/struct_test.go @@ -15,6 +15,8 @@ package ini import ( + "bytes" + "fmt" "strings" "testing" "time" @@ -23,10 +25,15 @@ import ( ) type testNested struct { - Cities []string `delim:"|"` - Visits []time.Time - Note string - Unused int `ini:"-"` + Cities []string `delim:"|"` + Visits []time.Time + Years []int + Numbers []int64 + Ages []uint + Populations []uint64 + Coordinates []float64 + Note string + Unused int `ini:"-"` } type testEmbeded struct { @@ -44,6 +51,9 @@ type testStruct struct { *testEmbeded `ini:"grade"` Unused int `ini:"-"` Unsigned uint + Omitted bool `ini:"omitthis,omitempty"` + Shadows []string `ini:",,allowshadow"` + ShadowInts []int `ini:"Shadows,,allowshadow"` } const _CONF_DATA_STRUCT = ` @@ -54,10 +64,18 @@ Money = 1.25 Born = 1993-10-07T20:17:05Z Duration = 2h45m Unsigned = 3 +omitthis = true +Shadows = 1, 2 +Shadows = 3, 4 [Others] Cities = HangZhou|Boston Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z +Years = 1993,1994 +Numbers = 10010,10086 +Ages = 18,19 +Populations = 12345678,98765432 +Coordinates = 192.168,10.11 Note = Hello world! [grade] @@ -130,6 +148,11 @@ func Test_Struct(t *testing.T) { So(strings.Join(ts.Others.Cities, ","), ShouldEqual, "HangZhou,Boston") So(ts.Others.Visits[0].String(), ShouldEqual, t.String()) + So(fmt.Sprint(ts.Others.Years), ShouldEqual, "[1993 1994]") + So(fmt.Sprint(ts.Others.Numbers), ShouldEqual, "[10010 10086]") + So(fmt.Sprint(ts.Others.Ages), ShouldEqual, "[18 19]") + So(fmt.Sprint(ts.Others.Populations), ShouldEqual, "[12345678 98765432]") + So(fmt.Sprint(ts.Others.Coordinates), ShouldEqual, "[192.168 10.11]") So(ts.Others.Note, ShouldEqual, "Hello world!") So(ts.testEmbeded.GPA, ShouldEqual, 2.8) }) @@ -168,6 +191,23 @@ func Test_Struct(t *testing.T) { So(cfg.MapTo(&unsupport4{}), ShouldNotBeNil) }) + Convey("Map to omitempty field", func() { + ts := new(testStruct) + So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil) + + So(ts.Omitted, ShouldEqual, true) + }) + + Convey("Map with shadows", func() { + cfg, err := LoadSources(LoadOptions{AllowShadows: true}, []byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + ts := new(testStruct) + So(cfg.MapTo(ts), ShouldBeNil) + + So(strings.Join(ts.Shadows, " "), ShouldEqual, "1 2 3 4") + So(fmt.Sprintf("%v", ts.ShadowInts), ShouldEqual, "[1 2 3 4]") + }) + Convey("Map from invalid data source", func() { So(MapTo(&testStruct{}, "hi"), ShouldNotBeNil) }) @@ -189,33 +229,106 @@ func Test_Struct(t *testing.T) { }) }) + Convey("Map to struct in strict mode", t, func() { + cfg, err := Load([]byte(` +name=bruce +age=a30`)) + So(err, ShouldBeNil) + + type Strict struct { + Name string `ini:"name"` + Age int `ini:"age"` + } + s := new(Strict) + + So(cfg.Section("").StrictMapTo(s), ShouldNotBeNil) + }) + Convey("Reflect from struct", t, func() { type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string - None []int + Dates []time.Time `delim:"|"` + Places []string + Years []int + Numbers []int64 + Ages []uint + Populations []uint64 + Coordinates []float64 + None []int } type Author struct { Name string `ini:"NAME"` Male bool Age int + Height uint GPA float64 + Date time.Time NeverMind string `ini:"-"` *Embeded `ini:"infos"` } - a := &Author{"Unknwon", true, 21, 2.8, "", + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + a := &Author{"Unknwon", true, 21, 100, 2.8, t, "", &Embeded{ - []time.Time{time.Now(), time.Now()}, + []time.Time{t, t}, []string{"HangZhou", "Boston"}, + []int{1993, 1994}, + []int64{10010, 10086}, + []uint{18, 19}, + []uint64{12345678, 98765432}, + []float64{192.168, 10.11}, []int{}, }} cfg := Empty() So(ReflectFrom(cfg, a), ShouldBeNil) - cfg.SaveTo("testdata/conf_reflect.ini") + + var buf bytes.Buffer + _, err = cfg.WriteTo(&buf) + So(err, ShouldBeNil) + So(buf.String(), ShouldEqual, `NAME = Unknwon +Male = true +Age = 21 +Height = 100 +GPA = 2.8 +Date = 1993-10-07T20:17:05Z + +[infos] +Dates = 1993-10-07T20:17:05Z|1993-10-07T20:17:05Z +Places = HangZhou,Boston +Years = 1993,1994 +Numbers = 10010,10086 +Ages = 18,19 +Populations = 12345678,98765432 +Coordinates = 192.168,10.11 +None = + +`) Convey("Reflect from non-point struct", func() { So(ReflectFrom(cfg, Author{}), ShouldNotBeNil) }) + + Convey("Reflect from struct with omitempty", func() { + cfg := Empty() + type SpecialStruct struct { + FirstName string `ini:"first_name"` + LastName string `ini:"last_name"` + JustOmitMe string `ini:"omitempty"` + LastLogin time.Time `ini:"last_login,omitempty"` + LastLogin2 time.Time `ini:",omitempty"` + NotEmpty int `ini:"omitempty"` + } + + So(ReflectFrom(cfg, &SpecialStruct{FirstName: "John", LastName: "Doe", NotEmpty: 9}), ShouldBeNil) + + var buf bytes.Buffer + _, err = cfg.WriteTo(&buf) + So(buf.String(), ShouldEqual, `first_name = John +last_name = Doe +omitempty = 9 + +`) + }) }) } diff --git a/vendor/github.com/go-openapi/analysis/.drone.sec b/vendor/github.com/go-openapi/analysis/.drone.sec deleted file mode 100644 index 769db5005ec..00000000000 --- a/vendor/github.com/go-openapi/analysis/.drone.sec +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.gU9bGHDmyTBt_nmOh7gxp_8MUeL4ckrzh3yys59Crqypnp0Rq52Q4B7T8_4u4elt-KF3jwHXfIjTWByyce8yS6FiYfKPVuCF_7qbSC5ES84Hqs0uigZP5qGFFYXC5EZlSw7giMgLrqfjbOLPhCpnTlWjfvlRssVbvWx4I6eXQEp5EaymJn2K-pgbKLaMpgkFcDUGxDeIkK9X9XQ2sYicoRPdRZgIZfMXBL05hphlCorXp1TCNAYaoI3qDmerzDGcT5YzIRKrlW2TUEy0EI4iySCbTpdlWhj3S1mt03P9Hmo8TkTfAG2Eu6XYs59RoqXCoGZNXHfsPqV5hyLpINNSzg.4VNQ5QHe2Ig7Giam.1fN53mPplTSOg0Mr8fwNH6FVjf8DNc-YyHhaET5IK4LeY0FPoyQZIjEEIqXAgzdKJ7uNfjf_dqLe2hRd-QqYjPacIHqI8FHWTqsDHC9maL3gouDxHZ3TsYVtCnO5iXrqZXpSWjDjEHKR3PQdUbBEIpEDBgkhkAN2eUHZuD1Hjy65SMWNX0eQ2CbIEgcPxHnoeGXx8k1c0VHZuXecuYYJPyGG88UWQD0aIusIR99za5cIIflT7pfvXQMuLLCcy5Y6RWkzLaNBg7R1GOZvCiOi7jmjYSKMdIxHELC4uO37n_UenvE3KWqjt9i82jbq6XMNoN6Gcwq0Qwl7PN1rtJRK6tlczm0G24Tq8t94cDLmEz2AAHdQ9T0d7rz3hS66BK4h49D_1HYoq1ZQ9lOT_Ph0TtVFjd0-_wR3k5h5A1_0azFRt_udYbn_v7-Wbga8CjGnaIpHz5hWTrutP4euorJAyyiANnOUVDeJNZYX4D-zdjT4Yoplk0mU5zo4Uo-oKDS_Nirr71uaZHcvh3jlryi0vMiDCg61CI1i1ulHeTiT65G2zDR4byOcL4cCa_vTRnmjK-2I3arPyYQfDVmXicKo8pP8RghdQ9c8ad5XkTbxOodcONU_yVqoKr8JQDuS6FNx3ck.0btSTYMPtKCu86bQsBX5Ew \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/.drone.yml b/vendor/github.com/go-openapi/analysis/.drone.yml deleted file mode 100644 index 6c26833116c..00000000000 --- a/vendor/github.com/go-openapi/analysis/.drone.yml +++ /dev/null @@ -1,36 +0,0 @@ -clone: - path: github.com/go-openapi/analysis - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - commands: - - go get -u github.com/stretchr/testify/assert - - go get -u gopkg.in/yaml.v2 - - go get -u github.com/go-openapi/swag - - go get -u github.com/go-openapi/jsonpointer - - go get -u github.com/go-openapi/spec - - go get -u github.com/go-openapi/loads/fmts - - go test -race ./... - - go test -v -cover -coverprofile=coverage.out -covermode=count ./... - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore deleted file mode 100644 index dd91ed6a04e..00000000000 --- a/vendor/github.com/go-openapi/analysis/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/analysis/.pullapprove.yml b/vendor/github.com/go-openapi/analysis/.pullapprove.yml deleted file mode 100644 index 4ab790edb96..00000000000 --- a/vendor/github.com/go-openapi/analysis/.pullapprove.yml +++ /dev/null @@ -1,12 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e37..00000000000 --- a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md deleted file mode 100644 index b6e526c0069..00000000000 --- a/vendor/github.com/go-openapi/analysis/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# OpenAPI initiative analysis [![Build Status](https://ci.vmware.run/api/badges/go-openapi/analysis/status.svg)](https://ci.vmware.run/go-openapi/analysis) [![Coverage](https://coverage.vmware.run/badges/go-openapi/analysis/coverage.svg)](https://coverage.vmware.run/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis) - - -A foundational library to analyze an OAI specification document for easier reasoning about the content. \ No newline at end of file diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go deleted file mode 100644 index d388db3a776..00000000000 --- a/vendor/github.com/go-openapi/analysis/analyzer.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - slashpath "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -type referenceAnalysis struct { - schemas map[string]spec.Ref - responses map[string]spec.Ref - parameters map[string]spec.Ref - items map[string]spec.Ref - allRefs map[string]spec.Ref - referenced struct { - schemas map[string]SchemaRef - responses map[string]*spec.Response - parameters map[string]*spec.Parameter - } -} - -func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { - r.allRefs["#"+key] = ref -} - -func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items) { - r.items["#"+key] = items.Ref - r.addRef(key, items.Ref) -} - -func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { - r.schemas["#"+key] = ref.Schema.Ref - r.addRef(key, ref.Schema.Ref) -} - -func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { - r.responses["#"+key] = resp.Ref - r.addRef(key, resp.Ref) -} - -func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { - r.parameters["#"+key] = param.Ref - r.addRef(key, param.Ref) -} - -// New takes a swagger spec object and returns an analyzed spec document. -// The analyzed document contains a number of indices that make it easier to -// reason about semantics of a swagger specification for use in code generation -// or validation etc. -func New(doc *spec.Swagger) *Spec { - a := &Spec{ - spec: doc, - consumes: make(map[string]struct{}, 150), - produces: make(map[string]struct{}, 150), - authSchemes: make(map[string]struct{}, 150), - operations: make(map[string]map[string]*spec.Operation, 150), - allSchemas: make(map[string]SchemaRef, 150), - allOfs: make(map[string]SchemaRef, 150), - references: referenceAnalysis{ - schemas: make(map[string]spec.Ref, 150), - responses: make(map[string]spec.Ref, 150), - parameters: make(map[string]spec.Ref, 150), - items: make(map[string]spec.Ref, 150), - allRefs: make(map[string]spec.Ref, 150), - }, - } - a.references.referenced.schemas = make(map[string]SchemaRef, 150) - a.references.referenced.responses = make(map[string]*spec.Response, 150) - a.references.referenced.parameters = make(map[string]*spec.Parameter, 150) - a.initialize() - return a -} - -// Spec takes a swagger spec object and turns it into a registry -// with a bunch of utility methods to act on the information in the spec -type Spec struct { - spec *spec.Swagger - consumes map[string]struct{} - produces map[string]struct{} - authSchemes map[string]struct{} - operations map[string]map[string]*spec.Operation - references referenceAnalysis - allSchemas map[string]SchemaRef - allOfs map[string]SchemaRef -} - -func (s *Spec) initialize() { - for _, c := range s.spec.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range s.spec.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range s.spec.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - for path, pathItem := range s.AllPaths() { - s.analyzeOperations(path, &pathItem) - } - - for name, parameter := range s.spec.Parameters { - refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) - if parameter.Items != nil { - s.analyzeItems("items", parameter.Items, refPref) - } - if parameter.In == "body" && parameter.Schema != nil { - s.analyzeSchema("schema", *parameter.Schema, refPref) - } - } - - for name, response := range s.spec.Responses { - refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) - for _, v := range response.Headers { - if v.Items != nil { - s.analyzeItems("items", v.Items, refPref) - } - } - if response.Schema != nil { - s.analyzeSchema("schema", *response.Schema, refPref) - } - } - - for name, schema := range s.spec.Definitions { - s.analyzeSchema(name, schema, "/definitions") - } - // TODO: after analyzing all things and flattening schemas etc - // resolve all the collected references to their final representations - // best put in a separate method because this could get expensive -} - -func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { - // TODO: resolve refs here? - op := pi - s.analyzeOperation("GET", path, op.Get) - s.analyzeOperation("PUT", path, op.Put) - s.analyzeOperation("POST", path, op.Post) - s.analyzeOperation("PATCH", path, op.Patch) - s.analyzeOperation("DELETE", path, op.Delete) - s.analyzeOperation("HEAD", path, op.Head) - s.analyzeOperation("OPTIONS", path, op.Options) - for i, param := range op.Parameters { - refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - if param.Items != nil { - s.analyzeItems("items", param.Items, refPref) - } - if param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } -} - -func (s *Spec) analyzeItems(name string, items *spec.Items, prefix string) { - if items == nil { - return - } - refPref := slashpath.Join(prefix, name) - s.analyzeItems(name, items.Items, refPref) - if items.Ref.String() != "" { - s.references.addItemsRef(refPref, items) - } -} - -func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { - if op == nil { - return - } - - for _, c := range op.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range op.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range op.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - if _, ok := s.operations[method]; !ok { - s.operations[method] = make(map[string]*spec.Operation) - } - s.operations[method][path] = op - prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) - for i, param := range op.Parameters { - refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - s.analyzeItems("items", param.Items, refPref) - if param.In == "body" && param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } - if op.Responses != nil { - if op.Responses.Default != nil { - refPref := slashpath.Join(prefix, "responses", "default") - if op.Responses.Default.Ref.String() != "" { - s.references.addResponseRef(refPref, op.Responses.Default) - } - for _, v := range op.Responses.Default.Headers { - s.analyzeItems("items", v.Items, refPref) - } - if op.Responses.Default.Schema != nil { - s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref) - } - } - for k, res := range op.Responses.StatusCodeResponses { - refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, &res) - } - for _, v := range res.Headers { - s.analyzeItems("items", v.Items, refPref) - } - if res.Schema != nil { - s.analyzeSchema("schema", *res.Schema, refPref) - } - } - } -} - -func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) { - refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) - schRef := SchemaRef{ - Name: name, - Schema: &schema, - Ref: spec.MustCreateRef("#" + refURI), - } - s.allSchemas["#"+refURI] = schRef - if schema.Ref.String() != "" { - s.references.addSchemaRef(refURI, schRef) - } - for k, v := range schema.Definitions { - s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions")) - } - for k, v := range schema.Properties { - s.analyzeSchema(k, v, slashpath.Join(refURI, "properties")) - } - for k, v := range schema.PatternProperties { - s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties")) - } - for i, v := range schema.AllOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) - } - if len(schema.AllOf) > 0 { - s.allOfs["#"+refURI] = SchemaRef{Name: name, Schema: &schema, Ref: spec.MustCreateRef("#" + refURI)} - } - for i, v := range schema.AnyOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) - } - for i, v := range schema.OneOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) - } - if schema.Not != nil { - s.analyzeSchema("not", *schema.Not, refURI) - } - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI) - } - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI) - } - if schema.Items != nil { - if schema.Items.Schema != nil { - s.analyzeSchema("items", *schema.Items.Schema, refURI) - } - for i, sch := range schema.Items.Schemas { - s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) - } - } -} - -// SecurityRequirement is a representation of a security requirement for an operation -type SecurityRequirement struct { - Name string - Scopes []string -} - -// SecurityRequirementsFor gets the security requirements for the operation -func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement { - if s.spec.Security == nil && operation.Security == nil { - return nil - } - - schemes := s.spec.Security - if operation.Security != nil { - schemes = operation.Security - } - - unique := make(map[string]SecurityRequirement) - for _, scheme := range schemes { - for k, v := range scheme { - if _, ok := unique[k]; !ok { - unique[k] = SecurityRequirement{Name: k, Scopes: v} - } - } - } - - var result []SecurityRequirement - for _, v := range unique { - result = append(result, v) - } - return result -} - -// SecurityDefinitionsFor gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { - requirements := s.SecurityRequirementsFor(operation) - if len(requirements) == 0 { - return nil - } - result := make(map[string]spec.SecurityScheme) - for _, v := range requirements { - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - return result -} - -// ConsumesFor gets the mediatypes for the operation -func (s *Spec) ConsumesFor(operation *spec.Operation) []string { - - if len(operation.Consumes) == 0 { - cons := make(map[string]struct{}, len(s.spec.Consumes)) - for _, k := range s.spec.Consumes { - cons[k] = struct{}{} - } - return s.structMapKeys(cons) - } - - cons := make(map[string]struct{}, len(operation.Consumes)) - for _, c := range operation.Consumes { - cons[c] = struct{}{} - } - return s.structMapKeys(cons) -} - -// ProducesFor gets the mediatypes for the operation -func (s *Spec) ProducesFor(operation *spec.Operation) []string { - if len(operation.Produces) == 0 { - prod := make(map[string]struct{}, len(s.spec.Produces)) - for _, k := range s.spec.Produces { - prod[k] = struct{}{} - } - return s.structMapKeys(prod) - } - - prod := make(map[string]struct{}, len(operation.Produces)) - for _, c := range operation.Produces { - prod[c] = struct{}{} - } - return s.structMapKeys(prod) -} - -func mapKeyFromParam(param *spec.Parameter) string { - return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) -} - -func fieldNameFromParam(param *spec.Parameter) string { - if nm, ok := param.Extensions.GetString("go-name"); ok { - return nm - } - return swag.ToGoName(param.Name) -} - -func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) { - for _, param := range parameters { - pr := param - if pr.Ref.String() != "" { - obj, _, err := pr.Ref.GetPointer().Get(s.spec) - if err != nil { - panic(err) - } - pr = obj.(spec.Parameter) - } - res[mapKeyFromParam(&pr)] = pr - } -} - -// ParametersFor the specified operation id -func (s *Spec) ParametersFor(operationID string) []spec.Parameter { - gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { - bag := make(map[string]spec.Parameter) - s.paramsAsMap(pi.Parameters, bag) - s.paramsAsMap(op.Parameters, bag) - - var res []spec.Parameter - for _, v := range bag { - res = append(res, v) - } - return res - } - for _, pi := range s.spec.Paths.Paths { - if pi.Get != nil && pi.Get.ID == operationID { - return gatherParams(&pi, pi.Get) - } - if pi.Head != nil && pi.Head.ID == operationID { - return gatherParams(&pi, pi.Head) - } - if pi.Options != nil && pi.Options.ID == operationID { - return gatherParams(&pi, pi.Options) - } - if pi.Post != nil && pi.Post.ID == operationID { - return gatherParams(&pi, pi.Post) - } - if pi.Patch != nil && pi.Patch.ID == operationID { - return gatherParams(&pi, pi.Patch) - } - if pi.Put != nil && pi.Put.ID == operationID { - return gatherParams(&pi, pi.Put) - } - if pi.Delete != nil && pi.Delete.ID == operationID { - return gatherParams(&pi, pi.Delete) - } - } - return nil -} - -// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { - res := make(map[string]spec.Parameter) - if pi, ok := s.spec.Paths.Paths[path]; ok { - s.paramsAsMap(pi.Parameters, res) - s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res) - } - return res -} - -// OperationForName gets the operation for the given id -func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { - for method, pathItem := range s.operations { - for path, op := range pathItem { - if operationID == op.ID { - return method, path, op, true - } - } - } - return "", "", nil, false -} - -// OperationFor the given method and path -func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { - if mp, ok := s.operations[strings.ToUpper(method)]; ok { - op, fn := mp[path] - return op, fn - } - return nil, false -} - -// Operations gathers all the operations specified in the spec document -func (s *Spec) Operations() map[string]map[string]*spec.Operation { - return s.operations -} - -func (s *Spec) structMapKeys(mp map[string]struct{}) []string { - if len(mp) == 0 { - return nil - } - - result := make([]string, 0, len(mp)) - for k := range mp { - result = append(result, k) - } - return result -} - -// AllPaths returns all the paths in the swagger spec -func (s *Spec) AllPaths() map[string]spec.PathItem { - if s.spec == nil || s.spec.Paths == nil { - return nil - } - return s.spec.Paths.Paths -} - -// OperationIDs gets all the operation ids based on method an dpath -func (s *Spec) OperationIDs() []string { - if len(s.operations) == 0 { - return nil - } - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p, o := range v { - if o.ID != "" { - result = append(result, o.ID) - } else { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - } - return result -} - -// RequiredConsumes gets all the distinct consumes that are specified in the specification document -func (s *Spec) RequiredConsumes() []string { - return s.structMapKeys(s.consumes) -} - -// RequiredProduces gets all the distinct produces that are specified in the specification document -func (s *Spec) RequiredProduces() []string { - return s.structMapKeys(s.produces) -} - -// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec -func (s *Spec) RequiredSecuritySchemes() []string { - return s.structMapKeys(s.authSchemes) -} - -// SchemaRef is a reference to a schema -type SchemaRef struct { - Name string - Ref spec.Ref - Schema *spec.Schema -} - -// SchemasWithAllOf returns schema references to all schemas that are defined -// with an allOf key -func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { - for _, v := range s.allOfs { - result = append(result, v) - } - return -} - -// AllDefinitions returns schema references for all the definitions that were discovered -func (s *Spec) AllDefinitions() (result []SchemaRef) { - for _, v := range s.allSchemas { - result = append(result, v) - } - return -} - -// AllDefinitionReferences returns json refs for all the discovered schemas -func (s *Spec) AllDefinitionReferences() (result []string) { - for _, v := range s.references.schemas { - result = append(result, v.String()) - } - return -} - -// AllParameterReferences returns json refs for all the discovered parameters -func (s *Spec) AllParameterReferences() (result []string) { - for _, v := range s.references.parameters { - result = append(result, v.String()) - } - return -} - -// AllResponseReferences returns json refs for all the discovered responses -func (s *Spec) AllResponseReferences() (result []string) { - for _, v := range s.references.responses { - result = append(result, v.String()) - } - return -} - -// AllItemsReferences returns the references for all the items -func (s *Spec) AllItemsReferences() (result []string) { - for _, v := range s.references.items { - result = append(result, v.String()) - } - return -} - -// AllReferences returns all the references found in the document -func (s *Spec) AllReferences() (result []string) { - for _, v := range s.references.allRefs { - result = append(result, v.String()) - } - return -} - -// AllRefs returns all the unique references found in the document -func (s *Spec) AllRefs() (result []spec.Ref) { - set := make(map[string]struct{}) - for _, v := range s.references.allRefs { - a := v.String() - if a == "" { - continue - } - if _, ok := set[a]; !ok { - set[a] = struct{}{} - result = append(result, v) - } - } - return -} diff --git a/vendor/github.com/go-openapi/analysis/analyzer_test.go b/vendor/github.com/go-openapi/analysis/analyzer_test.go deleted file mode 100644 index d13a5955508..00000000000 --- a/vendor/github.com/go-openapi/analysis/analyzer_test.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "encoding/json" - "fmt" - "path/filepath" - "sort" - "testing" - - "github.com/go-openapi/loads/fmts" - "github.com/go-openapi/spec" - "github.com/stretchr/testify/assert" -) - -func schemeNames(schemes []SecurityRequirement) []string { - var names []string - for _, v := range schemes { - names = append(names, v.Name) - } - sort.Sort(sort.StringSlice(names)) - return names -} - -func TestAnalyzer(t *testing.T) { - formatParam := spec.QueryParam("format").Typed("string", "") - - limitParam := spec.QueryParam("limit").Typed("integer", "int32") - limitParam.Extensions = spec.Extensions(map[string]interface{}{}) - limitParam.Extensions.Add("go-name", "Limit") - - skipParam := spec.QueryParam("skip").Typed("integer", "int32") - pi := spec.PathItem{} - pi.Parameters = []spec.Parameter{*limitParam} - - op := &spec.Operation{} - op.Consumes = []string{"application/x-yaml"} - op.Produces = []string{"application/x-yaml"} - op.Security = []map[string][]string{ - map[string][]string{"oauth2": []string{}}, - map[string][]string{"basic": nil}, - } - op.ID = "someOperation" - op.Parameters = []spec.Parameter{*skipParam} - pi.Get = op - - pi2 := spec.PathItem{} - pi2.Parameters = []spec.Parameter{*limitParam} - op2 := &spec.Operation{} - op2.ID = "anotherOperation" - op2.Parameters = []spec.Parameter{*skipParam} - pi2.Get = op2 - - spec := &spec.Swagger{ - SwaggerProps: spec.SwaggerProps{ - Consumes: []string{"application/json"}, - Produces: []string{"application/json"}, - Security: []map[string][]string{ - map[string][]string{"apikey": nil}, - }, - SecurityDefinitions: map[string]*spec.SecurityScheme{ - "basic": spec.BasicAuth(), - "apiKey": spec.APIKeyAuth("api_key", "query"), - "oauth2": spec.OAuth2AccessToken("http://authorize.com", "http://token.com"), - }, - Parameters: map[string]spec.Parameter{"format": *formatParam}, - Paths: &spec.Paths{ - Paths: map[string]spec.PathItem{ - "/": pi, - "/items": pi2, - }, - }, - }, - } - analyzer := New(spec) - - assert.Len(t, analyzer.consumes, 2) - assert.Len(t, analyzer.produces, 2) - assert.Len(t, analyzer.operations, 1) - assert.Equal(t, analyzer.operations["GET"]["/"], spec.Paths.Paths["/"].Get) - - expected := []string{"application/x-yaml"} - sort.Sort(sort.StringSlice(expected)) - consumes := analyzer.ConsumesFor(spec.Paths.Paths["/"].Get) - sort.Sort(sort.StringSlice(consumes)) - assert.Equal(t, expected, consumes) - - produces := analyzer.ProducesFor(spec.Paths.Paths["/"].Get) - sort.Sort(sort.StringSlice(produces)) - assert.Equal(t, expected, produces) - - expected = []string{"application/json"} - sort.Sort(sort.StringSlice(expected)) - consumes = analyzer.ConsumesFor(spec.Paths.Paths["/items"].Get) - sort.Sort(sort.StringSlice(consumes)) - assert.Equal(t, expected, consumes) - - produces = analyzer.ProducesFor(spec.Paths.Paths["/items"].Get) - sort.Sort(sort.StringSlice(produces)) - assert.Equal(t, expected, produces) - - expectedSchemes := []SecurityRequirement{SecurityRequirement{"oauth2", []string{}}, SecurityRequirement{"basic", nil}} - schemes := analyzer.SecurityRequirementsFor(spec.Paths.Paths["/"].Get) - assert.Equal(t, schemeNames(expectedSchemes), schemeNames(schemes)) - - securityDefinitions := analyzer.SecurityDefinitionsFor(spec.Paths.Paths["/"].Get) - assert.Equal(t, securityDefinitions["basic"], *spec.SecurityDefinitions["basic"]) - assert.Equal(t, securityDefinitions["oauth2"], *spec.SecurityDefinitions["oauth2"]) - - parameters := analyzer.ParamsFor("GET", "/") - assert.Len(t, parameters, 2) - - operations := analyzer.OperationIDs() - assert.Len(t, operations, 2) - - producers := analyzer.RequiredProduces() - assert.Len(t, producers, 2) - consumers := analyzer.RequiredConsumes() - assert.Len(t, consumers, 2) - authSchemes := analyzer.RequiredSecuritySchemes() - assert.Len(t, authSchemes, 3) - - ops := analyzer.Operations() - assert.Len(t, ops, 1) - assert.Len(t, ops["GET"], 2) - - op, ok := analyzer.OperationFor("get", "/") - assert.True(t, ok) - assert.NotNil(t, op) - - op, ok = analyzer.OperationFor("delete", "/") - assert.False(t, ok) - assert.Nil(t, op) -} - -func TestDefinitionAnalysis(t *testing.T) { - doc, err := loadSpec(filepath.Join("fixtures", "definitions.yml")) - if assert.NoError(t, err) { - analyzer := New(doc) - definitions := analyzer.allSchemas - // parameters - assertSchemaRefExists(t, definitions, "#/parameters/someParam/schema") - assertSchemaRefExists(t, definitions, "#/paths/~1some~1where~1{id}/parameters/1/schema") - assertSchemaRefExists(t, definitions, "#/paths/~1some~1where~1{id}/get/parameters/1/schema") - // responses - assertSchemaRefExists(t, definitions, "#/responses/someResponse/schema") - assertSchemaRefExists(t, definitions, "#/paths/~1some~1where~1{id}/get/responses/default/schema") - assertSchemaRefExists(t, definitions, "#/paths/~1some~1where~1{id}/get/responses/200/schema") - // definitions - assertSchemaRefExists(t, definitions, "#/definitions/tag") - assertSchemaRefExists(t, definitions, "#/definitions/tag/properties/id") - assertSchemaRefExists(t, definitions, "#/definitions/tag/properties/value") - assertSchemaRefExists(t, definitions, "#/definitions/tag/definitions/category") - assertSchemaRefExists(t, definitions, "#/definitions/tag/definitions/category/properties/id") - assertSchemaRefExists(t, definitions, "#/definitions/tag/definitions/category/properties/value") - assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalProps") - assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalProps/additionalProperties") - assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalItems") - assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalItems/items/0") - assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalItems/items/1") - assertSchemaRefExists(t, definitions, "#/definitions/withAdditionalItems/additionalItems") - assertSchemaRefExists(t, definitions, "#/definitions/withNot") - assertSchemaRefExists(t, definitions, "#/definitions/withNot/not") - assertSchemaRefExists(t, definitions, "#/definitions/withAnyOf") - assertSchemaRefExists(t, definitions, "#/definitions/withAnyOf/anyOf/0") - assertSchemaRefExists(t, definitions, "#/definitions/withAnyOf/anyOf/1") - assertSchemaRefExists(t, definitions, "#/definitions/withAllOf") - assertSchemaRefExists(t, definitions, "#/definitions/withAllOf/allOf/0") - assertSchemaRefExists(t, definitions, "#/definitions/withAllOf/allOf/1") - allOfs := analyzer.allOfs - assert.Len(t, allOfs, 1) - _, hasAllOf := allOfs["#/definitions/withAllOf"] - assert.True(t, hasAllOf) - } -} - -func loadSpec(path string) (*spec.Swagger, error) { - data, err := fmts.YAMLDoc(path) - if err != nil { - return nil, err - } - - var sw spec.Swagger - if err := json.Unmarshal(data, &sw); err != nil { - return nil, err - } - return &sw, nil -} - -func TestReferenceAnalysis(t *testing.T) { - doc, err := loadSpec(filepath.Join("fixtures", "references.yml")) - if assert.NoError(t, err) { - definitions := New(doc).references - - // parameters - assertRefExists(t, definitions.parameters, "#/paths/~1some~1where~1{id}/parameters/0") - assertRefExists(t, definitions.parameters, "#/paths/~1some~1where~1{id}/get/parameters/0") - - // responses - assertRefExists(t, definitions.responses, "#/paths/~1some~1where~1{id}/get/responses/404") - - // definitions - assertRefExists(t, definitions.schemas, "#/responses/notFound/schema") - assertRefExists(t, definitions.schemas, "#/paths/~1some~1where~1{id}/get/responses/200/schema") - assertRefExists(t, definitions.schemas, "#/definitions/tag/properties/audit") - - // items - assertRefExists(t, definitions.allRefs, "#/paths/~1some~1where~1{id}/get/parameters/1/items") - } -} - -func assertRefExists(t testing.TB, data map[string]spec.Ref, key string) bool { - if _, ok := data[key]; !ok { - return assert.Fail(t, fmt.Sprintf("expected %q to exist in the ref bag", key)) - } - return true -} - -func assertSchemaRefExists(t testing.TB, data map[string]SchemaRef, key string) bool { - if _, ok := data[key]; !ok { - return assert.Fail(t, fmt.Sprintf("expected %q to exist in schema ref bag", key)) - } - return true -} diff --git a/vendor/github.com/go-openapi/loads/.drone.sec b/vendor/github.com/go-openapi/loads/.drone.sec deleted file mode 100644 index 6d3e8439931..00000000000 --- a/vendor/github.com/go-openapi/loads/.drone.sec +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.xUjixvmMMeampw0Doyr_XLvcV5ICmDgDFmlcWqgmO84O3Hwn6dqbMkwOjpKMOyEJW_98b5Om5ED59IFt2S0T_OarlrsJL8jOK5fqxSMNXy2w8LfI-e5l1URverW41ofAVK8m9wK05q2BSJM_M6PyyODaQeDBiCVK1HreMZBlXpuUDVtBMPILQoga0eSZOsTR3DYEpZIS0A0Rsa5yIhMYR5d5-JMYqbqOCB7tNJ-BM83OzYgL7Hrz0J15kqaJmhQ-GJoMJDzOemSO9KxLCOfSPp11R_G3Mfd48xYnuiRuPOTakbOCLxuYviH6uoGVIOhnMyY9qKiDKbOn4BQUi1-igA.6qjQzq9nzAxRRKV_.z79R5cMFAEuEaAh6U9ykiL8oIqzMbs_I2C-hSFRh3HYRJ4fTB-9LrcbF0uASIOq7bBn4OQzW-0QFwYOs1uaawmrByGngV5d0afiZf_LBKcmTF2vtxRi_A_nxD-EHoPmh3lKBU5WNDe_8kLjEeS89HeyyFPuv5iQbqhzdqPFohHKVigwVqVYYLjB8GWQ4t7tC4c8l5rHanaXf71W0e3op2m8bebpZL0JPGhnULVA1oU27TYeLsO112JkIYtBwZxzvAs--bBFoKeGJWVMFzrKN68UACGZ9RFw0uGJbBmVC4-jRuIc6XpqeEqw3KG-rjFzkeEor3575qW-8kiXYqpub9SFUc3SSZkxJ8hB3SrnMBOuDUSenrXNpAbltmV3KAALzN3_bMBQuihwSRIn0Hg7-Dpni8BieMe44RMDvRu6p_71aeU_KW4V7Umy_h8gpIvQFuKGdTQH2ahsyCXL0ojqjMbVMdoWpDQTQ2_Fy8Qt_p2kJ8BgDo-1Akd4a6BNU2NGqsdnrJmtVKcTqLBadf9ylCwxHdGVrtNYORALSms2T6Q1s-poQnMjIwN8lnUD8ABUBpt4uVtrYkiWPVwrwywLQeiHhR-pboe_53kWDAx4Hy4rpbKsaxanYhy_bEbAYKb3aIUA.75GD4kRBCQdcGFYP1QYdCg \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/.drone.yml b/vendor/github.com/go-openapi/loads/.drone.yml deleted file mode 100644 index 98229103538..00000000000 --- a/vendor/github.com/go-openapi/loads/.drone.yml +++ /dev/null @@ -1,39 +0,0 @@ -clone: - path: github.com/go-openapi/loads - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - environment: - GOCOVMODE: "count" - commands: - - go get -u github.com/axw/gocov/gocov - - go get -u gopkg.in/matm/v1/gocov-html - - go get -u github.com/cee-dub/go-junit-report - - go get -u github.com/stretchr/testify/assert - - go get -u gopkg.in/yaml.v2 - - go get -u github.com/go-openapi/swag - - go get -u github.com/go-openapi/analysis - - go get -u github.com/go-openapi/spec - - ./hack/build-drone.sh - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore deleted file mode 100644 index e4f15f17bfc..00000000000 --- a/vendor/github.com/go-openapi/loads/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -secrets.yml -coverage.out -profile.cov -profile.out diff --git a/vendor/github.com/go-openapi/loads/.pullapprove.yml b/vendor/github.com/go-openapi/loads/.pullapprove.yml deleted file mode 100644 index 5ec183e2244..00000000000 --- a/vendor/github.com/go-openapi/loads/.pullapprove.yml +++ /dev/null @@ -1,13 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - chancez - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e37..00000000000 --- a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md deleted file mode 100644 index 9d5c899970e..00000000000 --- a/vendor/github.com/go-openapi/loads/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Loads OAI specs [![Build Status](https://ci.vmware.run/api/badges/go-openapi/loads/status.svg)](https://ci.vmware.run/go-openapi/loads) [![Coverage](https://coverage.vmware.run/badges/go-openapi/loads/coverage.svg)](https://coverage.vmware.run/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) - -Loading of OAI specification documents from local or remote locations. diff --git a/vendor/github.com/go-openapi/loads/json_test.go b/vendor/github.com/go-openapi/loads/json_test.go deleted file mode 100644 index 8b60eb19f4d..00000000000 --- a/vendor/github.com/go-openapi/loads/json_test.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package loads - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestLoadJSON(t *testing.T) { - serv := httptest.NewServer(http.HandlerFunc(jsonPestoreServer)) - defer serv.Close() - - s, err := JSONSpec(serv.URL) - assert.NoError(t, err) - assert.NotNil(t, s) - - ts2 := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusNotFound) - rw.Write([]byte("{}")) - })) - defer ts2.Close() - _, err = JSONSpec(ts2.URL) - assert.Error(t, err) -} - -var jsonPestoreServer = func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusOK) - rw.Write([]byte(petstoreJSON)) -} - -const petstoreJSON = `{ - "swagger": "2.0", - "info": { - "version": "1.0.0", - "title": "Swagger Petstore", - "contact": { - "name": "Wordnik API Team", - "url": "http://developer.wordnik.com" - }, - "license": { - "name": "Creative Commons 4.0 International", - "url": "http://creativecommons.org/licenses/by/4.0/" - } - }, - "host": "petstore.swagger.wordnik.com", - "basePath": "/api", - "schemes": [ - "http" - ], - "paths": { - "/pets": { - "get": { - "security": [ - { - "oauth2": ["read"] - } - ], - "tags": [ "Pet Operations" ], - "operationId": "getAllPets", - "parameters": [ - { - "name": "status", - "in": "query", - "description": "The status to filter by", - "type": "string" - } - ], - "summary": "Finds all pets in the system", - "responses": { - "200": { - "description": "Pet response", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Pet" - } - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "security": [ - { - "oauth2": ["write"] - } - ], - "tags": [ "Pet Operations" ], - "operationId": "createPet", - "summary": "Creates a new pet", - "parameters": [ - { - "name": "pet", - "in": "body", - "description": "The Pet to create", - "required": true, - "schema": { - "$ref": "#/definitions/newPet" - } - } - ], - "responses": { - "200": { - "description": "Created Pet response", - "schema": { - "$ref": "#/definitions/Pet" - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/pets/{id}": { - "delete": { - "security": [ - { - "oauth2": ["write"] - } - ], - "description": "Deletes the Pet by id", - "operationId": "deletePet", - "parameters": [ - { - "name": "id", - "in": "path", - "description": "ID of pet to delete", - "required": true, - "type": "integer", - "format": "int64" - } - ], - "responses": { - "204": { - "description": "pet deleted" - }, - "default": { - "description": "unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "get": { - "security": [ - { - "oauth2": ["read"] - } - ], - "tags": [ "Pet Operations" ], - "operationId": "getPetById", - "summary": "Finds the pet by id", - "responses": { - "200": { - "description": "Pet response", - "schema": { - "$ref": "#/definitions/Pet" - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "description": "ID of pet", - "required": true, - "type": "integer", - "format": "int64" - } - ] - } - }, - "definitions": { - "Category": { - "id": "Category", - "properties": { - "id": { - "format": "int64", - "type": "integer" - }, - "name": { - "type": "string" - } - } - }, - "Pet": { - "id": "Pet", - "properties": { - "category": { - "$ref": "#/definitions/Category" - }, - "id": { - "description": "unique identifier for the pet", - "format": "int64", - "maximum": 100.0, - "minimum": 0.0, - "type": "integer" - }, - "name": { - "type": "string" - }, - "photoUrls": { - "items": { - "type": "string" - }, - "type": "array" - }, - "status": { - "description": "pet status in the store", - "enum": [ - "available", - "pending", - "sold" - ], - "type": "string" - }, - "tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "type": "array" - } - }, - "required": [ - "id", - "name" - ] - }, - "newPet": { - "allOf": [ - { - "$ref": "#/definitions/Pet" - } - ], - "required": [ - "name" - ] - }, - "Tag": { - "id": "Tag", - "properties": { - "id": { - "format": "int64", - "type": "integer" - }, - "name": { - "type": "string" - } - } - }, - "Error": { - "required": [ - "code", - "message" - ], - "properties": { - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - } - } - } - }, - "produces": [ - "application/json", - "application/xml", - "text/plain", - "text/html" - ], - "securityDefinitions": { - "oauth2": { - "type": "oauth2", - "scopes": { - "read": "Read access.", - "write": "Write access" - }, - "flow": "accessCode", - "authorizationUrl": "http://petstore.swagger.wordnik.com/oauth/authorize", - "tokenUrl": "http://petstore.swagger.wordnik.com/oauth/token" - } - } -}` diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go deleted file mode 100644 index ff1ee1c9faa..00000000000 --- a/vendor/github.com/go-openapi/loads/spec.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package loads - -import ( - "encoding/json" - "fmt" - "net/url" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// JSONDoc loads a json document from either a file or a remote url -func JSONDoc(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// DocLoader represents a doc loader type -type DocLoader func(string) (json.RawMessage, error) - -// DocMatcher represents a predicate to check if a loader matches -type DocMatcher func(string) bool - -var loaders = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc} - -// AddLoader for a document -func AddLoader(predicate DocMatcher, load DocLoader) { - prev := loaders - loaders = &loader{ - Match: predicate, - Fn: load, - Next: prev, - } - -} - -type loader struct { - Fn DocLoader - Match DocMatcher - Next *loader -} - -// JSONSpec loads a spec from a json document -func JSONSpec(path string) (*Document, error) { - data, err := JSONDoc(path) - if err != nil { - return nil, err - } - // convert to json - return Analyzed(json.RawMessage(data), "") -} - -// Document represents a swagger spec document -type Document struct { - // specAnalyzer - Analyzer *analysis.Spec - spec *spec.Swagger - origSpec *spec.Swagger - schema *spec.Schema - raw json.RawMessage -} - -// Spec loads a new spec document -func Spec(path string) (*Document, error) { - specURL, err := url.Parse(path) - if err != nil { - return nil, err - } - for l := loaders.Next; l != nil; l = l.Next { - if loaders.Match(specURL.Path) { - b, err2 := loaders.Fn(path) - if err2 != nil { - return nil, err2 - } - return Analyzed(b, "") - } - } - b, err := loaders.Fn(path) - if err != nil { - return nil, err - } - return Analyzed(b, "") -} - -var swag20Schema = spec.MustLoadSwagger20Schema() - -// Analyzed creates a new analyzed spec document -func Analyzed(data json.RawMessage, version string) (*Document, error) { - if version == "" { - version = "2.0" - } - if version != "2.0" { - return nil, fmt.Errorf("spec version %q is not supported", version) - } - - swspec := new(spec.Swagger) - if err := json.Unmarshal(data, swspec); err != nil { - return nil, err - } - - origsqspec := new(spec.Swagger) - if err := json.Unmarshal(data, origsqspec); err != nil { - return nil, err - } - - d := &Document{ - Analyzer: analysis.New(swspec), - schema: swag20Schema, - spec: swspec, - raw: data, - origSpec: origsqspec, - } - return d, nil -} - -// Expanded expands the ref fields in the spec document and returns a new spec document -func (d *Document) Expanded() (*Document, error) { - swspec := new(spec.Swagger) - if err := json.Unmarshal(d.raw, swspec); err != nil { - return nil, err - } - if err := spec.ExpandSpec(swspec); err != nil { - return nil, err - } - - dd := &Document{ - Analyzer: analysis.New(swspec), - spec: swspec, - schema: swag20Schema, - raw: d.raw, - origSpec: d.origSpec, - } - return dd, nil -} - -// BasePath the base path for this spec -func (d *Document) BasePath() string { - return d.spec.BasePath -} - -// Version returns the version of this spec -func (d *Document) Version() string { - return d.spec.Swagger -} - -// Schema returns the swagger 2.0 schema -func (d *Document) Schema() *spec.Schema { - return d.schema -} - -// Spec returns the swagger spec object model -func (d *Document) Spec() *spec.Swagger { - return d.spec -} - -// Host returns the host for the API -func (d *Document) Host() string { - return d.spec.Host -} - -// Raw returns the raw swagger spec as json bytes -func (d *Document) Raw() json.RawMessage { - return d.raw -} - -func (d *Document) OrigSpec() *spec.Swagger { - return d.origSpec -} - -// ResetDefinitions gives a shallow copy with the models reset -func (d *Document) ResetDefinitions() *Document { - defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) - for k, v := range d.origSpec.Definitions { - defs[k] = v - } - - d.spec.Definitions = defs - return d -} - -// Pristine creates a new pristine document instance based on the input data -func (d *Document) Pristine() *Document { - dd, _ := Analyzed(d.Raw(), d.Version()) - return dd -} diff --git a/vendor/github.com/go-openapi/loads/spec_test.go b/vendor/github.com/go-openapi/loads/spec_test.go deleted file mode 100644 index 582e0802efe..00000000000 --- a/vendor/github.com/go-openapi/loads/spec_test.go +++ /dev/null @@ -1,339 +0,0 @@ -package loads - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestUnknownSpecVersion(t *testing.T) { - _, err := Analyzed([]byte{}, "0.9") - assert.Error(t, err) -} - -func TestDefaultsTo20(t *testing.T) { - d, err := Analyzed(PetStoreJSONMessage, "") - - assert.NoError(t, err) - assert.NotNil(t, d) - assert.Equal(t, "2.0", d.Version()) - // assert.Equal(t, "2.0", d.data["swagger"].(string)) - assert.Equal(t, "/api", d.BasePath()) -} - -// func TestValidatesValidSchema(t *testing.T) { -// d, err := New(testingutil.PetStoreJSONMessage, "") - -// assert.NoError(t, err) -// assert.NotNil(t, d) -// res := d.Validate() -// assert.NotNil(t, res) -// assert.True(t, res.Valid()) -// assert.Empty(t, res.Errors()) - -// } - -// func TestFailsInvalidSchema(t *testing.T) { -// d, err := New(testingutil.InvalidJSONMessage, "") - -// assert.NoError(t, err) -// assert.NotNil(t, d) - -// res := d.Validate() -// assert.NotNil(t, res) -// assert.False(t, res.Valid()) -// assert.NotEmpty(t, res.Errors()) -// } - -func TestFailsInvalidJSON(t *testing.T) { - _, err := Analyzed(json.RawMessage([]byte("{]")), "") - - assert.Error(t, err) -} - -// PetStoreJSONMessage json raw message for Petstore20 -var PetStoreJSONMessage = json.RawMessage([]byte(PetStore20)) - -// PetStore20 json doc for swagger 2.0 pet store -const PetStore20 = `{ - "swagger": "2.0", - "info": { - "version": "1.0.0", - "title": "Swagger Petstore", - "contact": { - "name": "Wordnik API Team", - "url": "http://developer.wordnik.com" - }, - "license": { - "name": "Creative Commons 4.0 International", - "url": "http://creativecommons.org/licenses/by/4.0/" - } - }, - "host": "petstore.swagger.wordnik.com", - "basePath": "/api", - "schemes": [ - "http" - ], - "paths": { - "/pets": { - "get": { - "security": [ - { - "basic": [] - } - ], - "tags": [ "Pet Operations" ], - "operationId": "getAllPets", - "parameters": [ - { - "name": "status", - "in": "query", - "description": "The status to filter by", - "type": "string" - }, - { - "name": "limit", - "in": "query", - "description": "The maximum number of results to return", - "type": "integer", - "format": "int64" - } - ], - "summary": "Finds all pets in the system", - "responses": { - "200": { - "description": "Pet response", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Pet" - } - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "post": { - "security": [ - { - "basic": [] - } - ], - "tags": [ "Pet Operations" ], - "operationId": "createPet", - "summary": "Creates a new pet", - "consumes": ["application/x-yaml"], - "produces": ["application/x-yaml"], - "parameters": [ - { - "name": "pet", - "in": "body", - "description": "The Pet to create", - "required": true, - "schema": { - "$ref": "#/definitions/newPet" - } - } - ], - "responses": { - "200": { - "description": "Created Pet response", - "schema": { - "$ref": "#/definitions/Pet" - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - } - }, - "/pets/{id}": { - "delete": { - "security": [ - { - "apiKey": [] - } - ], - "description": "Deletes the Pet by id", - "operationId": "deletePet", - "parameters": [ - { - "name": "id", - "in": "path", - "description": "ID of pet to delete", - "required": true, - "type": "integer", - "format": "int64" - } - ], - "responses": { - "204": { - "description": "pet deleted" - }, - "default": { - "description": "unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "get": { - "tags": [ "Pet Operations" ], - "operationId": "getPetById", - "summary": "Finds the pet by id", - "responses": { - "200": { - "description": "Pet response", - "schema": { - "$ref": "#/definitions/Pet" - } - }, - "default": { - "description": "Unexpected error", - "schema": { - "$ref": "#/definitions/Error" - } - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "description": "ID of pet", - "required": true, - "type": "integer", - "format": "int64" - } - ] - } - }, - "definitions": { - "Category": { - "id": "Category", - "properties": { - "id": { - "format": "int64", - "type": "integer" - }, - "name": { - "type": "string" - } - } - }, - "Pet": { - "id": "Pet", - "properties": { - "category": { - "$ref": "#/definitions/Category" - }, - "id": { - "description": "unique identifier for the pet", - "format": "int64", - "maximum": 100.0, - "minimum": 0.0, - "type": "integer" - }, - "name": { - "type": "string" - }, - "photoUrls": { - "items": { - "type": "string" - }, - "type": "array" - }, - "status": { - "description": "pet status in the store", - "enum": [ - "available", - "pending", - "sold" - ], - "type": "string" - }, - "tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "type": "array" - } - }, - "required": [ - "id", - "name" - ] - }, - "newPet": { - "anyOf": [ - { - "$ref": "#/definitions/Pet" - }, - { - "required": [ - "name" - ] - } - ] - }, - "Tag": { - "id": "Tag", - "properties": { - "id": { - "format": "int64", - "type": "integer" - }, - "name": { - "type": "string" - } - } - }, - "Error": { - "required": [ - "code", - "message" - ], - "properties": { - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - } - } - } - }, - "consumes": [ - "application/json", - "application/xml" - ], - "produces": [ - "application/json", - "application/xml", - "text/plain", - "text/html" - ], - "securityDefinitions": { - "basic": { - "type": "basic" - }, - "apiKey": { - "type": "apiKey", - "in": "header", - "name": "X-API-KEY" - } - } -} -` diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README index 5f9c11485e0..387b4eb6890 100644 --- a/vendor/github.com/golang/glog/README +++ b/vendor/github.com/golang/glog/README @@ -5,7 +5,7 @@ Leveled execution logs for Go. This is an efficient pure Go implementation of leveled logs in the manner of the open source C++ package - http://code.google.com/p/google-glog + https://github.com/google/glog By binding methods to booleans it is possible to use the log package without paying the expense of evaluating the arguments to the log. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go index 3e63fffd5ec..54bd7afdcab 100644 --- a/vendor/github.com/golang/glog/glog.go +++ b/vendor/github.com/golang/glog/glog.go @@ -676,7 +676,10 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo } } data := buf.Bytes() - if l.toStderr { + if !flag.Parsed() { + os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) + os.Stderr.Write(data) + } else if l.toStderr { os.Stderr.Write(data) } else { if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 00000000000..89e07ae192a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,136 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 00000000000..f2c6906b91b --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,155 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/any/any.proto +// DO NOT EDIT! + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/any/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, + 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, + 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, + 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, + 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, + 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, + 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, + 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, + 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, + 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 00000000000..81dcf46ccfd --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,140 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any_test.go b/vendor/github.com/golang/protobuf/ptypes/any_test.go new file mode 100644 index 00000000000..ed675b489ca --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any_test.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/golang/protobuf/ptypes/any" +) + +func TestMarshalUnmarshal(t *testing.T) { + orig := &any.Any{Value: []byte("test")} + + packed, err := MarshalAny(orig) + if err != nil { + t.Errorf("MarshalAny(%+v): got: _, %v exp: _, nil", orig, err) + } + + unpacked := &any.Any{} + err = UnmarshalAny(packed, unpacked) + if err != nil || !proto.Equal(unpacked, orig) { + t.Errorf("got: %v, %+v; want nil, %+v", err, unpacked, orig) + } +} + +func TestIs(t *testing.T) { + a, err := MarshalAny(&pb.FileDescriptorProto{}) + if err != nil { + t.Fatal(err) + } + if Is(a, &pb.DescriptorProto{}) { + t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is") + } + if !Is(a, &pb.FileDescriptorProto{}) { + t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not") + } +} + +func TestIsDifferentUrlPrefixes(t *testing.T) { + m := &pb.FileDescriptorProto{} + a := &any.Any{TypeUrl: "foo/bar/" + proto.MessageName(m)} + if !Is(a, m) { + t.Errorf("message with type url %q didn't satisfy Is for type %q", a.TypeUrl, proto.MessageName(m)) + } +} + +func TestUnmarshalDynamic(t *testing.T) { + want := &pb.FileDescriptorProto{Name: proto.String("foo")} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + var got DynamicAny + if err := UnmarshalAny(a, &got); err != nil { + t.Fatal(err) + } + if !proto.Equal(got.Message, want) { + t.Errorf("invalid result from UnmarshalAny, got %q want %q", got.Message, want) + } +} + +func TestEmpty(t *testing.T) { + want := &pb.FileDescriptorProto{} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + got, err := Empty(a) + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, want) { + t.Errorf("unequal empty message, got %q, want %q", got, want) + } + + // that's a valid type_url for a message which shouldn't be linked into this + // test binary. We want an error. + a.TypeUrl = "type.googleapis.com/google.protobuf.FieldMask" + if _, err := Empty(a); err == nil { + t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl) + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 00000000000..c0d595da7ab --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 00000000000..65cb0f8eb5f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 00000000000..569748346d4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto +// DO NOT EDIT! + +/* +Package duration is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/duration/duration.proto + +It has these top-level messages: + Duration +*/ +package duration + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 189 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, + 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, + 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, + 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, + 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, + 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, + 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13, + 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, + 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 00000000000..96c1796d659 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,98 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration_test.go b/vendor/github.com/golang/protobuf/ptypes/duration_test.go new file mode 100644 index 00000000000..e761289f12c --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration_test.go @@ -0,0 +1,121 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "math" + "testing" + "time" + + "github.com/golang/protobuf/proto" + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + minGoSeconds = math.MinInt64 / int64(1e9) + maxGoSeconds = math.MaxInt64 / int64(1e9) +) + +var durationTests = []struct { + proto *durpb.Duration + isValid bool + inRange bool + dur time.Duration +}{ + // The zero duration. + {&durpb.Duration{0, 0}, true, true, 0}, + // Some ordinary non-zero durations. + {&durpb.Duration{100, 0}, true, true, 100 * time.Second}, + {&durpb.Duration{-100, 0}, true, true, -100 * time.Second}, + {&durpb.Duration{100, 987}, true, true, 100*time.Second + 987}, + {&durpb.Duration{-100, -987}, true, true, -(100*time.Second + 987)}, + // The largest duration representable in Go. + {&durpb.Duration{maxGoSeconds, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64}, + // The smallest duration representable in Go. + {&durpb.Duration{minGoSeconds, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64}, + {nil, false, false, 0}, + {&durpb.Duration{-100, 987}, false, false, 0}, + {&durpb.Duration{100, -987}, false, false, 0}, + {&durpb.Duration{math.MinInt64, 0}, false, false, 0}, + {&durpb.Duration{math.MaxInt64, 0}, false, false, 0}, + // The largest valid duration. + {&durpb.Duration{maxSeconds, 1e9 - 1}, true, false, 0}, + // The smallest valid duration. + {&durpb.Duration{minSeconds, -(1e9 - 1)}, true, false, 0}, + // The smallest invalid duration above the valid range. + {&durpb.Duration{maxSeconds + 1, 0}, false, false, 0}, + // The largest invalid duration below the valid range. + {&durpb.Duration{minSeconds - 1, -(1e9 - 1)}, false, false, 0}, + // One nanosecond past the largest duration representable in Go. + {&durpb.Duration{maxGoSeconds, int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0}, + // One nanosecond past the smallest duration representable in Go. + {&durpb.Duration{minGoSeconds, int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0}, + // One second past the largest duration representable in Go. + {&durpb.Duration{maxGoSeconds + 1, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0}, + // One second past the smallest duration representable in Go. + {&durpb.Duration{minGoSeconds - 1, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0}, +} + +func TestValidateDuration(t *testing.T) { + for _, test := range durationTests { + err := validateDuration(test.proto) + gotValid := (err == nil) + if gotValid != test.isValid { + t.Errorf("validateDuration(%v) = %t, want %t", test.proto, gotValid, test.isValid) + } + } +} + +func TestDuration(t *testing.T) { + for _, test := range durationTests { + got, err := Duration(test.proto) + gotOK := (err == nil) + wantOK := test.isValid && test.inRange + if gotOK != wantOK { + t.Errorf("Duration(%v) ok = %t, want %t", test.proto, gotOK, wantOK) + } + if err == nil && got != test.dur { + t.Errorf("Duration(%v) = %v, want %v", test.proto, got, test.dur) + } + } +} + +func TestDurationProto(t *testing.T) { + for _, test := range durationTests { + if test.isValid && test.inRange { + got := DurationProto(test.dur) + if !proto.Equal(got, test.proto) { + t.Errorf("DurationProto(%v) = %v, want %v", test.dur, got, test.proto) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh new file mode 100755 index 00000000000..2a5b4e8bdcc --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh @@ -0,0 +1,66 @@ +#!/bin/bash -e +# +# This script fetches and rebuilds the "well-known types" protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. +# You also need Go and Git installed. + +PKG=github.com/golang/protobuf/ptypes +UPSTREAM=https://github.com/google/protobuf +UPSTREAM_SUBDIR=src/google/protobuf +PROTO_FILES=' + any.proto + duration.proto + empty.proto + struct.proto + timestamp.proto + wrappers.proto +' + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go git protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) +trap 'rm -rf $tmpdir' EXIT + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd $base + +echo 1>&2 "fetching latest protos... " +git clone -q $UPSTREAM $tmpdir +# Pass 1: build mapping from upstream filename to our filename. +declare -A filename_map +for f in $(cd $PKG && find * -name '*.proto'); do + echo -n 1>&2 "looking for latest version of $f... " + up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/) + echo 1>&2 $up + if [ $(echo $up | wc -w) != "1" ]; then + die "not exactly one match" + fi + filename_map[$up]=$f +done +# Pass 2: copy files +for up in "${!filename_map[@]}"; do + f=${filename_map[$up]} + shortname=$(basename $f | sed 's,\.proto$,,') + cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f +done + +# Run protoc once per package. +for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do + echo 1>&2 "* $dir" + protoc --go_out=. $dir/*.proto +done +echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 00000000000..1b365762202 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 00000000000..ffcc51594ac --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,127 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +// DO NOT EDIT! + +/* +Package timestamp is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +It has these top-level messages: + Timestamp +*/ +package timestamp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 194 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, + 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, + 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, + 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, + 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, + 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, + 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, + 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, + 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, + 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 00000000000..7992a85886f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,111 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go new file mode 100644 index 00000000000..114a7f9fcfc --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go @@ -0,0 +1,138 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "math" + "testing" + "time" + + "github.com/golang/protobuf/proto" + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +var tests = []struct { + ts *tspb.Timestamp + valid bool + t time.Time +}{ + // The timestamp representing the Unix epoch date. + {&tspb.Timestamp{0, 0}, true, utcDate(1970, 1, 1)}, + // The smallest representable timestamp. + {&tspb.Timestamp{math.MinInt64, math.MinInt32}, false, + time.Unix(math.MinInt64, math.MinInt32).UTC()}, + // The smallest representable timestamp with non-negative nanos. + {&tspb.Timestamp{math.MinInt64, 0}, false, time.Unix(math.MinInt64, 0).UTC()}, + // The earliest valid timestamp. + {&tspb.Timestamp{minValidSeconds, 0}, true, utcDate(1, 1, 1)}, + //"0001-01-01T00:00:00Z"}, + // The largest representable timestamp. + {&tspb.Timestamp{math.MaxInt64, math.MaxInt32}, false, + time.Unix(math.MaxInt64, math.MaxInt32).UTC()}, + // The largest representable timestamp with nanos in range. + {&tspb.Timestamp{math.MaxInt64, 1e9 - 1}, false, + time.Unix(math.MaxInt64, 1e9-1).UTC()}, + // The largest valid timestamp. + {&tspb.Timestamp{maxValidSeconds - 1, 1e9 - 1}, true, + time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)}, + // The smallest invalid timestamp that is larger than the valid range. + {&tspb.Timestamp{maxValidSeconds, 0}, false, time.Unix(maxValidSeconds, 0).UTC()}, + // A date before the epoch. + {&tspb.Timestamp{-281836800, 0}, true, utcDate(1961, 1, 26)}, + // A date after the epoch. + {&tspb.Timestamp{1296000000, 0}, true, utcDate(2011, 1, 26)}, + // A date after the epoch, in the middle of the day. + {&tspb.Timestamp{1296012345, 940483}, true, + time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)}, +} + +func TestValidateTimestamp(t *testing.T) { + for _, s := range tests { + got := validateTimestamp(s.ts) + if (got == nil) != s.valid { + t.Errorf("validateTimestamp(%v) = %v, want %v", s.ts, got, s.valid) + } + } +} + +func TestTimestamp(t *testing.T) { + for _, s := range tests { + got, err := Timestamp(s.ts) + if (err == nil) != s.valid { + t.Errorf("Timestamp(%v) error = %v, but valid = %t", s.ts, err, s.valid) + } else if s.valid && got != s.t { + t.Errorf("Timestamp(%v) = %v, want %v", s.ts, got, s.t) + } + } + // Special case: a nil Timestamp is an error, but returns the 0 Unix time. + got, err := Timestamp(nil) + want := time.Unix(0, 0).UTC() + if got != want { + t.Errorf("Timestamp(nil) = %v, want %v", got, want) + } + if err == nil { + t.Errorf("Timestamp(nil) error = nil, expected error") + } +} + +func TestTimestampProto(t *testing.T) { + for _, s := range tests { + got, err := TimestampProto(s.t) + if (err == nil) != s.valid { + t.Errorf("TimestampProto(%v) error = %v, but valid = %t", s.t, err, s.valid) + } else if s.valid && !proto.Equal(got, s.ts) { + t.Errorf("TimestampProto(%v) = %v, want %v", s.t, got, s.ts) + } + } + // No corresponding special case here: no time.Time results in a nil Timestamp. +} + +func TestTimestampString(t *testing.T) { + for _, test := range []struct { + ts *tspb.Timestamp + want string + }{ + // Not much testing needed because presumably time.Format is + // well-tested. + {&tspb.Timestamp{0, 0}, "1970-01-01T00:00:00Z"}, + {&tspb.Timestamp{minValidSeconds - 1, 0}, "(timestamp: seconds:-62135596801 before 0001-01-01)"}, + } { + got := TimestampString(test.ts) + if got != test.want { + t.Errorf("TimestampString(%v) = %q, want %q", test.ts, got, test.want) + } + } +} + +func utcDate(year, month, day int) time.Time { + return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) +} diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml new file mode 100644 index 00000000000..4f2ee4d9733 --- /dev/null +++ b/vendor/github.com/google/btree/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/google/btree/LICENSE similarity index 100% rename from vendor/github.com/go-openapi/analysis/LICENSE rename to vendor/github.com/google/btree/LICENSE diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md new file mode 100644 index 00000000000..6062a4dacd4 --- /dev/null +++ b/vendor/github.com/google/btree/README.md @@ -0,0 +1,12 @@ +# BTree implementation for Go + +![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +The API is based off of the wonderful +http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to +act as a drop-in replacement for gollrb trees. + +See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go new file mode 100644 index 00000000000..fc5aaaa13aa --- /dev/null +++ b/vendor/github.com/google/btree/btree.go @@ -0,0 +1,649 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implmentation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values or backwards iteration. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are not safe for concurrent write access. +type FreeList struct { + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + index := len(f.freelist) - 1 + if index < 0 { + return new(node) + } + f.freelist, n = f.freelist[:index], f.freelist[index] + return +} + +func (f *FreeList) freeNode(n *node) { + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + } +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + freelist: f, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + (*s)[index] = nil + copy((*s)[index:], (*s)[index+1:]) + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + (*s)[index] = nil + copy((*s)[index:], (*s)[index+1:]) + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + t *BTree +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.t.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items = n.items[:i] + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children = n.children[:i+1] + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.children[i] + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.children[i].insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + child := n.children[i] + if len(child.items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + child := n.children[i] + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + stealFrom := n.children[i-1] + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + stealFrom := n.children[i+1] + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + child = n.children[i] + } + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.t.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +// iterate provides a simple method for iterating over elements in the tree. +// It could probably use some work to be extra-efficient (it calls from() a +// little more than it should), but it works pretty well for now. +// +// It requires that 'from' and 'to' both return true for values we should hit +// with the iterator. It should also be the case that 'from' returns true for +// values less than or equal to values 'to' returns true for, and 'to' +// returns true for values greater than or equal to those that 'from' +// does. +func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool { + for i, item := range n.items { + if !from(item) { + continue + } + if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) { + return false + } + if !to(item) { + return false + } + if !iter(item) { + return false + } + } + if len(n.children) > 0 { + return n.children[len(n.children)-1].iterate(from, to, iter) + } + return true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + freelist *FreeList +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (t *BTree) newNode() (n *node) { + n = t.freelist.newNode() + n.t = t + return +} + +func (t *BTree) freeNode(n *node) { + for i := range n.items { + n.items[i] = nil // clear to allow GC + } + n.items = n.items[:0] + for i := range n.children { + n.children[i] = nil // clear to allow GC + } + n.children = n.children[:0] + n.t = nil // clear to allow GC + t.freelist.freeNode(n) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return !a.Less(greaterOrEqual) }, + func(a Item) bool { return a.Less(lessThan) }, + iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return a.Less(pivot) }, + iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return !a.Less(pivot) }, + func(a Item) bool { return true }, + iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return true }, + iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/vendor/github.com/google/btree/btree_mem.go b/vendor/github.com/google/btree/btree_mem.go new file mode 100644 index 00000000000..cb95b7fa1bc --- /dev/null +++ b/vendor/github.com/google/btree/btree_mem.go @@ -0,0 +1,76 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build ignore + +// This binary compares memory usage between btree and gollrb. +package main + +import ( + "flag" + "fmt" + "math/rand" + "runtime" + "time" + + "github.com/google/btree" + "github.com/petar/GoLLRB/llrb" +) + +var ( + size = flag.Int("size", 1000000, "size of the tree to build") + degree = flag.Int("degree", 8, "degree of btree") + gollrb = flag.Bool("llrb", false, "use llrb instead of btree") +) + +func main() { + flag.Parse() + vals := rand.Perm(*size) + var t, v interface{} + v = vals + var stats runtime.MemStats + for i := 0; i < 10; i++ { + runtime.GC() + } + fmt.Println("-------- BEFORE ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + start := time.Now() + if *gollrb { + tr := llrb.New() + for _, v := range vals { + tr.ReplaceOrInsert(llrb.Int(v)) + } + t = tr // keep it around + } else { + tr := btree.New(*degree) + for _, v := range vals { + tr.ReplaceOrInsert(btree.Int(v)) + } + t = tr // keep it around + } + fmt.Printf("%v inserts in %v\n", *size, time.Since(start)) + fmt.Println("-------- AFTER ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + for i := 0; i < 10; i++ { + runtime.GC() + } + fmt.Println("-------- AFTER GC ----------") + runtime.ReadMemStats(&stats) + fmt.Printf("%+v\n", stats) + if t == v { + fmt.Println("to make sure vals and tree aren't GC'd") + } +} diff --git a/vendor/github.com/google/btree/btree_test.go b/vendor/github.com/google/btree/btree_test.go new file mode 100644 index 00000000000..0a2fdde112b --- /dev/null +++ b/vendor/github.com/google/btree/btree_test.go @@ -0,0 +1,309 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package btree + +import ( + "flag" + "fmt" + "math/rand" + "reflect" + "testing" + "time" +) + +func init() { + seed := time.Now().Unix() + fmt.Println(seed) + rand.Seed(seed) +} + +// perm returns a random permutation of n Int items in the range [0, n). +func perm(n int) (out []Item) { + for _, v := range rand.Perm(n) { + out = append(out, Int(v)) + } + return +} + +// rang returns an ordered list of Int items in the range [0, n). +func rang(n int) (out []Item) { + for i := 0; i < n; i++ { + out = append(out, Int(i)) + } + return +} + +// all extracts all items from a tree in order as a slice. +func all(t *BTree) (out []Item) { + t.Ascend(func(a Item) bool { + out = append(out, a) + return true + }) + return +} + +var btreeDegree = flag.Int("degree", 32, "B-Tree degree") + +func TestBTree(t *testing.T) { + tr := New(*btreeDegree) + const treeSize = 10000 + for i := 0; i < 10; i++ { + if min := tr.Min(); min != nil { + t.Fatalf("empty min, got %+v", min) + } + if max := tr.Max(); max != nil { + t.Fatalf("empty max, got %+v", max) + } + for _, item := range perm(treeSize) { + if x := tr.ReplaceOrInsert(item); x != nil { + t.Fatal("insert found item", item) + } + } + for _, item := range perm(treeSize) { + if x := tr.ReplaceOrInsert(item); x == nil { + t.Fatal("insert didn't find item", item) + } + } + if min, want := tr.Min(), Item(Int(0)); min != want { + t.Fatalf("min: want %+v, got %+v", want, min) + } + if max, want := tr.Max(), Item(Int(treeSize-1)); max != want { + t.Fatalf("max: want %+v, got %+v", want, max) + } + got := all(tr) + want := rang(treeSize) + if !reflect.DeepEqual(got, want) { + t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) + } + for _, item := range perm(treeSize) { + if x := tr.Delete(item); x == nil { + t.Fatalf("didn't find %v", item) + } + } + if got = all(tr); len(got) > 0 { + t.Fatalf("some left!: %v", got) + } + } +} + +func ExampleBTree() { + tr := New(*btreeDegree) + for i := Int(0); i < 10; i++ { + tr.ReplaceOrInsert(i) + } + fmt.Println("len: ", tr.Len()) + fmt.Println("get3: ", tr.Get(Int(3))) + fmt.Println("get100: ", tr.Get(Int(100))) + fmt.Println("del4: ", tr.Delete(Int(4))) + fmt.Println("del100: ", tr.Delete(Int(100))) + fmt.Println("replace5: ", tr.ReplaceOrInsert(Int(5))) + fmt.Println("replace100:", tr.ReplaceOrInsert(Int(100))) + fmt.Println("min: ", tr.Min()) + fmt.Println("delmin: ", tr.DeleteMin()) + fmt.Println("max: ", tr.Max()) + fmt.Println("delmax: ", tr.DeleteMax()) + fmt.Println("len: ", tr.Len()) + // Output: + // len: 10 + // get3: 3 + // get100: + // del4: 4 + // del100: + // replace5: 5 + // replace100: + // min: 0 + // delmin: 0 + // max: 100 + // delmax: 100 + // len: 8 +} + +func TestDeleteMin(t *testing.T) { + tr := New(3) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + for v := tr.DeleteMin(); v != nil; v = tr.DeleteMin() { + got = append(got, v) + } + if want := rang(100); !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestDeleteMax(t *testing.T) { + tr := New(3) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + for v := tr.DeleteMax(); v != nil; v = tr.DeleteMax() { + got = append(got, v) + } + // Reverse our list. + for i := 0; i < len(got)/2; i++ { + got[i], got[len(got)-i-1] = got[len(got)-i-1], got[i] + } + if want := rang(100); !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestAscendRange(t *testing.T) { + tr := New(2) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.AscendRange(Int(40), Int(60), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rang(100)[40:60]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.AscendRange(Int(40), Int(60), func(a Item) bool { + if a.(Int) > 50 { + return false + } + got = append(got, a) + return true + }) + if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestAscendLessThan(t *testing.T) { + tr := New(*btreeDegree) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.AscendLessThan(Int(60), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rang(100)[:60]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.AscendLessThan(Int(60), func(a Item) bool { + if a.(Int) > 50 { + return false + } + got = append(got, a) + return true + }) + if want := rang(100)[:51]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +func TestAscendGreaterOrEqual(t *testing.T) { + tr := New(*btreeDegree) + for _, v := range perm(100) { + tr.ReplaceOrInsert(v) + } + var got []Item + tr.AscendGreaterOrEqual(Int(40), func(a Item) bool { + got = append(got, a) + return true + }) + if want := rang(100)[40:]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } + got = got[:0] + tr.AscendGreaterOrEqual(Int(40), func(a Item) bool { + if a.(Int) > 50 { + return false + } + got = append(got, a) + return true + }) + if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) { + t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) + } +} + +const benchmarkTreeSize = 10000 + +func BenchmarkInsert(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + b.StartTimer() + i := 0 + for i < b.N { + tr := New(*btreeDegree) + for _, item := range insertP { + tr.ReplaceOrInsert(item) + i++ + if i >= b.N { + return + } + } + } +} + +func BenchmarkDelete(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + removeP := perm(benchmarkTreeSize) + b.StartTimer() + i := 0 + for i < b.N { + b.StopTimer() + tr := New(*btreeDegree) + for _, v := range insertP { + tr.ReplaceOrInsert(v) + } + b.StartTimer() + for _, item := range removeP { + tr.Delete(item) + i++ + if i >= b.N { + return + } + } + if tr.Len() > 0 { + panic(tr.Len()) + } + } +} + +func BenchmarkGet(b *testing.B) { + b.StopTimer() + insertP := perm(benchmarkTreeSize) + removeP := perm(benchmarkTreeSize) + b.StartTimer() + i := 0 + for i < b.N { + b.StopTimer() + tr := New(*btreeDegree) + for _, v := range insertP { + tr.ReplaceOrInsert(v) + } + b.StartTimer() + for _, item := range removeP { + tr.Get(item) + i++ + if i >= b.N { + return + } + } + } +} diff --git a/vendor/github.com/googleapis/gnostic/.gitignore b/vendor/github.com/googleapis/gnostic/.gitignore new file mode 100644 index 00000000000..9db23e56abf --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/.gitignore @@ -0,0 +1,13 @@ +# Eclipse +.checkstyle +.project +.settings +# Swift +.build +Packages +# vi +*.swp +# vscode +.vscode +.DS_Store +*~ diff --git a/vendor/github.com/googleapis/gnostic/.travis-install.sh b/vendor/github.com/googleapis/gnostic/.travis-install.sh new file mode 100755 index 00000000000..6ae5332c513 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/.travis-install.sh @@ -0,0 +1,36 @@ +#!/bin/sh + +# +# Install dependencies that aren't available as Ubuntu packages. +# +# Everything goes into $HOME/local. +# +# Scripts should add +# - $HOME/local/bin to PATH +# - $HOME/local/lib to LD_LIBRARY_PATH +# + +cd +mkdir -p local + +# Install swift +SWIFT_BRANCH=swift-3.0.2-release +SWIFT_VERSION=swift-3.0.2-RELEASE +SWIFT_PLATFORM=ubuntu14.04 +SWIFT_URL=https://swift.org/builds/$SWIFT_BRANCH/$(echo "$SWIFT_PLATFORM" | tr -d .)/$SWIFT_VERSION/$SWIFT_VERSION-$SWIFT_PLATFORM.tar.gz + +echo $SWIFT_URL + +curl -fSsL $SWIFT_URL -o swift.tar.gz +tar -xzf swift.tar.gz --strip-components=2 --directory=local + +# Install protoc +PROTOC_URL=https://github.com/google/protobuf/releases/download/v3.2.0rc2/protoc-3.2.0rc2-linux-x86_64.zip + +echo $PROTOC_URL + +curl -fSsL $PROTOC_URL -o protoc.zip +unzip protoc.zip -d local + +# Verify installation +find local diff --git a/vendor/github.com/googleapis/gnostic/.travis.yml b/vendor/github.com/googleapis/gnostic/.travis.yml new file mode 100644 index 00000000000..35c7bfe3473 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/.travis.yml @@ -0,0 +1,43 @@ +# Travis CI build file for OpenAPI Compiler, including Go and Swift plugins + +# Use Ubuntu 14.04 +dist: trusty + +sudo: false + +language: go + +addons: + apt: + packages: + - clang-3.8 + - lldb-3.8 + - libicu-dev + - libtool + - libcurl4-openssl-dev + - libbsd-dev + - build-essential + - libssl-dev + - uuid-dev + - curl + - unzip + +install: + - ./.travis-install.sh + - export PATH=.:$HOME/local/bin:$PATH + - make + +script: + - go test . -v + - pushd plugins/gnostic-go-generator/examples/v2.0/bookstore + - make test + - popd + - export PATH=.:$HOME/local/bin:$PATH + - export LD_LIBRARY_PATH=$HOME/local/lib + - pushd plugins/gnostic-swift-generator + - make + - cd examples/bookstore + - make + - .build/debug/Server & + - make test + diff --git a/vendor/github.com/googleapis/gnostic/COMPILE-PROTOS.sh b/vendor/github.com/googleapis/gnostic/COMPILE-PROTOS.sh new file mode 100755 index 00000000000..e771c6e17b0 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/COMPILE-PROTOS.sh @@ -0,0 +1,31 @@ +#!/bin/sh +# +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +go get github.com/golang/protobuf/protoc-gen-go + +protoc \ +--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. \ +OpenAPIv2/OpenAPIv2.proto + +protoc \ +--go_out=:. \ +plugins/plugin.proto + +protoc \ +--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. \ +OpenAPIv3/OpenAPIv3.proto + diff --git a/vendor/github.com/googleapis/gnostic/CONTRIBUTING.md b/vendor/github.com/googleapis/gnostic/CONTRIBUTING.md new file mode 100644 index 00000000000..6736efd943c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/CONTRIBUTING.md @@ -0,0 +1,35 @@ +# How to become a contributor and submit your own code + +## Contributor License Agreements + +We'd love to accept your sample apps and patches! Before we can take them, we +have to jump a couple of legal hurdles. + +Please fill out either the individual or corporate Contributor License Agreement +(CLA). + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual CLA] + (https://developers.google.com/open-source/cla/individual). + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA] + (https://developers.google.com/open-source/cla/corporate). + +Follow either of the two links above to access the appropriate CLA and +instructions for how to sign and return it. Once we receive it, we'll be able to +accept your pull requests. + +## Contributing A Patch + +1. Submit an issue describing your proposed change to the repo in question. +1. The repo owner will respond to your issue promptly. +1. If your proposed change is accepted, and you haven't already done so, sign a + Contributor License Agreement (see details above). +1. Fork the desired repo, develop and test your code changes. +1. Ensure that your code adheres to the existing style in the sample to which + you are contributing. Refer to the + [Google Cloud Platform Samples Style Guide] + (https://github.com/GoogleCloudPlatform/Template/wiki/style.html) for the + recommended coding standards for this organization. +1. Ensure that your code has an appropriate set of unit tests which all pass. +1. Submit a pull request. diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE new file mode 100644 index 00000000000..6b0b1270ff0 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/googleapis/gnostic/Makefile b/vendor/github.com/googleapis/gnostic/Makefile new file mode 100644 index 00000000000..13726ee4cd1 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/Makefile @@ -0,0 +1,15 @@ + +build: + go get + go install + cd generate-gnostic; go get; go install + cd apps/report; go get; go install + cd apps/petstore-builder; go get; go install + cd plugins/gnostic-go-sample; go get; go install + cd plugins/gnostic-go-generator/encode-templates; go get; go install + cd plugins/gnostic-go-generator; go get; go install + rm -f $(GOPATH)/bin/gnostic-go-client $(GOPATH)/bin/gnostic-go-server + ln -s $(GOPATH)/bin/gnostic-go-generator $(GOPATH)/bin/gnostic-go-client + ln -s $(GOPATH)/bin/gnostic-go-generator $(GOPATH)/bin/gnostic-go-server + cd extensions/sample; make + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go new file mode 100644 index 00000000000..0e32451a320 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -0,0 +1,8728 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v2 + +import ( + "fmt" + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v2" + "regexp" + "strings" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v2" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := in.(bool) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in interface{}, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes, _ := yaml.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. +func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { + errors := make([]error, 0) + x := &ApiKeySecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [apiKey] + if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header query] + if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. +func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { + errors := make([]error, 0) + x := &BasicAuthenticationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [basic] + if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. +func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { + errors := make([]error, 0) + x := &BodyParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "schema"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "required", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [body] + if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema schema = 5; + v5 := compiler.MapValueForKey(m, "schema") + if v5 != nil { + var err error + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefault creates an object of type Default if possible, returning an error if not. +func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { + errors := make([]error, 0) + x := &Default{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefinitions creates an object of type Definitions if possible, returning an error if not. +func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { + errors := make([]error, 0) + x := &Definitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "paths", "swagger"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string swagger = 1; + v1 := compiler.MapValueForKey(m, "swagger") + if v1 != nil { + x.Swagger, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [2.0] + if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) + if err != nil { + errors = append(errors, err) + } + } + // string host = 3; + v3 := compiler.MapValueForKey(m, "host") + if v3 != nil { + x.Host, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string base_path = 4; + v4 := compiler.MapValueForKey(m, "basePath") + if v4 != nil { + x.BasePath, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string schemes = 5; + v5 := compiler.MapValueForKey(m, "schemes") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 6; + v6 := compiler.MapValueForKey(m, "consumes") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 7; + v7 := compiler.MapValueForKey(m, "produces") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Paths paths = 8; + v8 := compiler.MapValueForKey(m, "paths") + if v8 != nil { + var err error + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) + if err != nil { + errors = append(errors, err) + } + } + // Definitions definitions = 9; + v9 := compiler.MapValueForKey(m, "definitions") + if v9 != nil { + var err error + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // ParameterDefinitions parameters = 10; + v10 := compiler.MapValueForKey(m, "parameters") + if v10 != nil { + var err error + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponseDefinitions responses = 11; + v11 := compiler.MapValueForKey(m, "responses") + if v11 != nil { + var err error + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // SecurityDefinitions security_definitions = 13; + v13 := compiler.MapValueForKey(m, "securityDefinitions") + if v13 != nil { + var err error + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Tag tags = 14; + v14 := compiler.MapValueForKey(m, "tags") + if v14 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := v14.([]interface{}) + if ok { + for _, item := range a { + y, err := NewTag(item, compiler.NewContext("tags", context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 15; + v15 := compiler.MapValueForKey(m, "externalDocs") + if v15 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 16; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamples creates an object of type Examples if possible, returning an error if not. +func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { + errors := make([]error, 0) + x := &Examples{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. +func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { + errors := make([]error, 0) + x := &FileSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string format = 1; + v1 := compiler.MapValueForKey(m, "format") + if v1 != nil { + x.Format, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 2; + v2 := compiler.MapValueForKey(m, "title") + if v2 != nil { + x.Title, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 4; + v4 := compiler.MapValueForKey(m, "default") + if v4 != nil { + var err error + x.Default, err = NewAny(v4, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string required = 5; + v5 := compiler.MapValueForKey(m, "required") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [file] + if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 7; + v7 := compiler.MapValueForKey(m, "readOnly") + if v7 != nil { + x.ReadOnly, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. +func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { + errors := make([]error, 0) + x := &FormDataParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [formData] + if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array file] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 18; + v18 := compiler.MapValueForKey(m, "description") + if v18 != nil { + x.Description, ok = v18.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 19; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. +func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { + errors := make([]error, 0) + x := &HeaderParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header] + if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaders creates an object of type Headers if possible, returning an error if not. +func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { + errors := make([]error, 0) + x := &Headers{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeader additional_properties = 1; + // MAP: Header + x.AdditionalProperties = make([]*NamedHeader, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedHeader{} + pair.Name = k + var err error + pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string version = 2; + v2 := compiler.MapValueForKey(m, "version") + if v2 != nil { + x.Version, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 4; + v4 := compiler.MapValueForKey(m, "termsOfService") + if v4 != nil { + x.TermsOfService, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 5; + v5 := compiler.MapValueForKey(m, "contact") + if v5 != nil { + var err error + x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 6; + v6 := compiler.MapValueForKey(m, "license") + if v6 != nil { + var err error + x.License, err = NewLicense(v6, compiler.NewContext("license", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Schema = make([]*Schema, 0) + y, err := NewSchema(m, compiler.NewContext("", context)) + if err != nil { + return nil, err + } + x.Schema = append(x.Schema, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. +func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { + errors := make([]error, 0) + x := &JsonReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"$ref", "description"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in interface{}, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. +func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { + errors := make([]error, 0) + x := &NamedHeader{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Header value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. +func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { + errors := make([]error, 0) + x := &NamedParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Parameter value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. +func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { + errors := make([]error, 0) + x := &NamedResponse{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Response value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. +func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { + errors := make([]error, 0) + x := &NamedResponseValue{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseValue value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. +func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { + errors := make([]error, 0) + x := &NamedSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. +func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &NamedSecurityDefinitionsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecurityDefinitionsItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. +func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { + errors := make([]error, 0) + x := &NonBodyParameter{} + matched := false + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // HeaderParameterSubSchema header_parameter_sub_schema = 1; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // QueryParameterSubSchema query_parameter_sub_schema = 3; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // PathParameterSubSchema path_parameter_sub_schema = 4; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. +func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { + errors := make([]error, 0) + x := &Oauth2AccessCodeSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [accessCode] + if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 5; + v5 := compiler.MapValueForKey(m, "tokenUrl") + if v5 != nil { + x.TokenUrl, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 6; + v6 := compiler.MapValueForKey(m, "description") + if v6 != nil { + x.Description, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. +func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ApplicationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [application] + if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. +func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ImplicitSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [implicit] + if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. +func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { + errors := make([]error, 0) + x := &Oauth2PasswordSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [password] + if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. +func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { + errors := make([]error, 0) + x := &Oauth2Scopes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedString{} + pair.Name = k + pair.Value = v.(string) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := v1.([]interface{}) + if ok { + x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 6; + v6 := compiler.MapValueForKey(m, "produces") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 7; + v7 := compiler.MapValueForKey(m, "consumes") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParametersItem parameters = 8; + v8 := compiler.MapValueForKey(m, "parameters") + if v8 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v8.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // Responses responses = 9; + v9 := compiler.MapValueForKey(m, "responses") + if v9 != nil { + var err error + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string schemes = 10; + v10 := compiler.MapValueForKey(m, "schemes") + if v10 != nil { + v, ok := v10.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 11; + v11 := compiler.MapValueForKey(m, "deprecated") + if v11 != nil { + x.Deprecated, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated NamedAny vendor_extension = 13; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + matched := false + // BodyParameter body_parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_BodyParameter{BodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // NonBodyParameter non_body_parameter = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. +func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { + errors := make([]error, 0) + x := &ParameterDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameter additional_properties = 1; + // MAP: Parameter + x.AdditionalProperties = make([]*NamedParameter, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedParameter{} + pair.Name = k + var err error + pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. +func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { + errors := make([]error, 0) + x := &ParametersItem{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 2; + v2 := compiler.MapValueForKey(m, "get") + if v2 != nil { + var err error + x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 3; + v3 := compiler.MapValueForKey(m, "put") + if v3 != nil { + var err error + x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 4; + v4 := compiler.MapValueForKey(m, "post") + if v4 != nil { + var err error + x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 5; + v5 := compiler.MapValueForKey(m, "delete") + if v5 != nil { + var err error + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 6; + v6 := compiler.MapValueForKey(m, "options") + if v6 != nil { + var err error + x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 7; + v7 := compiler.MapValueForKey(m, "head") + if v7 != nil { + var err error + x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 8; + v8 := compiler.MapValueForKey(m, "patch") + if v8 != nil { + var err error + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated ParametersItem parameters = 9; + v9 := compiler.MapValueForKey(m, "parameters") + if v9 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v9.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. +func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { + errors := make([]error, 0) + x := &PathParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"required"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [path] + if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedAny vendor_extension = 1; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + // repeated NamedPathItem path = 2; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. +func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { + errors := make([]error, 0) + x := &PrimitivesItems{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 18; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. +func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { + errors := make([]error, 0) + x := &QueryParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [query] + if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "examples", "headers", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaItem schema = 2; + v2 := compiler.MapValueForKey(m, "schema") + if v2 != nil { + var err error + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // Headers headers = 3; + v3 := compiler.MapValueForKey(m, "headers") + if v3 != nil { + var err error + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) + if err != nil { + errors = append(errors, err) + } + } + // Examples examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. +func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { + errors := make([]error, 0) + x := &ResponseDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponse additional_properties = 1; + // MAP: Response + x.AdditionalProperties = make([]*NamedResponse, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedResponse{} + pair.Name = k + var err error + pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. +func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { + errors := make([]error, 0) + x := &ResponseValue{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedResponseValue response_code = 1; + // MAP: ResponseValue ^([0-9]{3})$|^(default)$ + x.ResponseCode = make([]*NamedResponseValue, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if pattern2.MatchString(k) { + pair := &NamedResponseValue{} + pair.Name = k + var err error + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseCode = append(x.ResponseCode, pair) + } + } + } + // repeated NamedAny vendor_extension = 2; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 3; + v3 := compiler.MapValueForKey(m, "title") + if v3 != nil { + x.Title, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float multiple_of = 6; + v6 := compiler.MapValueForKey(m, "multipleOf") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.MultipleOf = v6 + case float32: + x.MultipleOf = float64(v6) + case uint64: + x.MultipleOf = float64(v6) + case uint32: + x.MultipleOf = float64(v6) + case int64: + x.MultipleOf = float64(v6) + case int32: + x.MultipleOf = float64(v6) + case int: + x.MultipleOf = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 7; + v7 := compiler.MapValueForKey(m, "maximum") + if v7 != nil { + switch v7 := v7.(type) { + case float64: + x.Maximum = v7 + case float32: + x.Maximum = float64(v7) + case uint64: + x.Maximum = float64(v7) + case uint32: + x.Maximum = float64(v7) + case int64: + x.Maximum = float64(v7) + case int32: + x.Maximum = float64(v7) + case int: + x.Maximum = float64(v7) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 8; + v8 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v8 != nil { + x.ExclusiveMaximum, ok = v8.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 9; + v9 := compiler.MapValueForKey(m, "minimum") + if v9 != nil { + switch v9 := v9.(type) { + case float64: + x.Minimum = v9 + case float32: + x.Minimum = float64(v9) + case uint64: + x.Minimum = float64(v9) + case uint32: + x.Minimum = float64(v9) + case int64: + x.Minimum = float64(v9) + case int32: + x.Minimum = float64(v9) + case int: + x.Minimum = float64(v9) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 10; + v10 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v10 != nil { + x.ExclusiveMinimum, ok = v10.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 11; + v11 := compiler.MapValueForKey(m, "maxLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 12; + v12 := compiler.MapValueForKey(m, "minLength") + if v12 != nil { + t, ok := v12.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 13; + v13 := compiler.MapValueForKey(m, "pattern") + if v13 != nil { + x.Pattern, ok = v13.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 14; + v14 := compiler.MapValueForKey(m, "maxItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 15; + v15 := compiler.MapValueForKey(m, "minItems") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 16; + v16 := compiler.MapValueForKey(m, "uniqueItems") + if v16 != nil { + x.UniqueItems, ok = v16.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 17; + v17 := compiler.MapValueForKey(m, "maxProperties") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 18; + v18 := compiler.MapValueForKey(m, "minProperties") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 19; + v19 := compiler.MapValueForKey(m, "required") + if v19 != nil { + v, ok := v19.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // AdditionalPropertiesItem additional_properties = 21; + v21 := compiler.MapValueForKey(m, "additionalProperties") + if v21 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) + if err != nil { + errors = append(errors, err) + } + } + // TypeItem type = 22; + v22 := compiler.MapValueForKey(m, "type") + if v22 != nil { + var err error + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 23; + v23 := compiler.MapValueForKey(m, "items") + if v23 != nil { + var err error + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Schema all_of = 24; + v24 := compiler.MapValueForKey(m, "allOf") + if v24 != nil { + // repeated Schema + x.AllOf = make([]*Schema, 0) + a, ok := v24.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSchema(item, compiler.NewContext("allOf", context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // Properties properties = 25; + v25 := compiler.MapValueForKey(m, "properties") + if v25 != nil { + var err error + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) + if err != nil { + errors = append(errors, err) + } + } + // string discriminator = 26; + v26 := compiler.MapValueForKey(m, "discriminator") + if v26 != nil { + x.Discriminator, ok = v26.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 27; + v27 := compiler.MapValueForKey(m, "readOnly") + if v27 != nil { + x.ReadOnly, ok = v27.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 28; + v28 := compiler.MapValueForKey(m, "xml") + if v28 != nil { + var err error + x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 29; + v29 := compiler.MapValueForKey(m, "externalDocs") + if v29 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 30; + v30 := compiler.MapValueForKey(m, "example") + if v30 != nil { + var err error + x.Example, err = NewAny(v30, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 31; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. +func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { + errors := make([]error, 0) + x := &SchemaItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // FileSchema file_schema = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_FileSchema{FileSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. +func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { + errors := make([]error, 0) + x := &SecurityDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecurityDefinitionsItem additional_properties = 1; + // MAP: SecurityDefinitionsItem + x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSecurityDefinitionsItem{} + pair.Name = k + var err error + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. +func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &SecurityDefinitionsItem{} + matched := false + // BasicAuthenticationSecurity basic_authentication_security = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // ApiKeySecurity api_key_security = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ImplicitSecurity oauth2_implicit_security = 3; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2PasswordSecurity oauth2_password_security = 4; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ApplicationSecurity oauth2_application_security = 5; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + a, ok := in.([]interface{}) + if !ok { + message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Value = make([]string, 0) + for _, s := range a { + x.Value = append(x.Value, s.(string)) + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. +func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { + errors := make([]error, 0) + x := &TypeItem{} + switch in := in.(type) { + case string: + x.Value = make([]string, 0) + x.Value = append(x.Value, in) + case []interface{}: + x.Value = make([]string, 0) + for _, v := range in { + value, ok := v.(string) + if ok { + x.Value = append(x.Value, value) + } else { + message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) + errors = append(errors, compiler.NewError(context, message)) + } + } + default: + message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. +func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { + errors := make([]error, 0) + x := &VendorExtension{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ApiKeySecurity objects. +func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BodyParameter objects. +func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Default objects. +func (m *Default) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Definitions objects. +func (m *Definitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Definitions != nil { + _, err := m.Definitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.SecurityDefinitions != nil { + _, err := m.SecurityDefinitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Examples objects. +func (m *Examples) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FileSchema objects. +func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Headers objects. +func (m *Headers) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.Schema { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside JsonReference objects. +func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewJsonReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeader objects. +func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameter objects. +func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponse objects. +func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseValue objects. +func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchema objects. +func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NonBodyParameter objects. +func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) + if ok { + _, err := p.HeaderParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) + if ok { + _, err := p.FormDataParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) + if ok { + _, err := p.QueryParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) + if ok { + _, err := p.PathParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2Scopes objects. +func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*Parameter_BodyParameter) + if ok { + _, err := p.BodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*Parameter_NonBodyParameter) + if ok { + _, err := p.NonBodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterDefinitions objects. +func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersItem objects. +func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParametersItem_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParametersItem_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewParametersItem(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathParameterSubSchema objects. +func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PrimitivesItems objects. +func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside QueryParameterSubSchema objects. +func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseDefinitions objects. +func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseValue objects. +func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseValue_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseValue_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewResponseValue(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.ResponseCode { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewSchema(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Type != nil { + _, err := m.Type.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaItem objects. +func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaItem_FileSchema) + if ok { + _, err := p.FileSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitions objects. +func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) + if ok { + _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) + if ok { + _, err := p.ApiKeySecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) + if ok { + _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) + if ok { + _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) + if ok { + _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) + if ok { + _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside TypeItem objects. +func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside VendorExtension objects. +func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return v1.Boolean + } + return nil +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() interface{} { + var err error + var info1 []yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info1) + if err == nil { + return info1 + } + var info2 yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info2) + if err == nil { + return info2 + } + var info3 interface{} + err = yaml.Unmarshal([]byte(m.Yaml), &info3) + if err == nil { + return info3 + } + return nil +} + +// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. +func (m *ApiKeySecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. +func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. +func (m *BodyParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.Schema != nil { + info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + } + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.Email != "" { + info = append(info, yaml.MapItem{"email", m.Email}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Default suitable for JSON or YAML export. +func (m *Default) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. +func (m *Definitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Swagger != "" { + info = append(info, yaml.MapItem{"swagger", m.Swagger}) + } + if m.Info != nil { + info = append(info, yaml.MapItem{"info", m.Info.ToRawInfo()}) + } + // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Host != "" { + info = append(info, yaml.MapItem{"host", m.Host}) + } + if m.BasePath != "" { + info = append(info, yaml.MapItem{"basePath", m.BasePath}) + } + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{"schemes", m.Schemes}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{"consumes", m.Consumes}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{"produces", m.Produces}) + } + if m.Paths != nil { + info = append(info, yaml.MapItem{"paths", m.Paths.ToRawInfo()}) + } + // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Definitions != nil { + info = append(info, yaml.MapItem{"definitions", m.Definitions.ToRawInfo()}) + } + // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Parameters != nil { + info = append(info, yaml.MapItem{"parameters", m.Parameters.ToRawInfo()}) + } + // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Responses != nil { + info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + } + // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"security", items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.SecurityDefinitions != nil { + info = append(info, yaml.MapItem{"securityDefinitions", m.SecurityDefinitions.ToRawInfo()}) + } + // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Tags) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Tags { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"tags", items}) + } + // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Examples suitable for JSON or YAML export. +func (m *Examples) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. +func (m *FileSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.ReadOnly != false { + info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. +func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. +func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Headers suitable for JSON or YAML export. +func (m *Headers) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Version != "" { + info = append(info, yaml.MapItem{"version", m.Version}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.TermsOfService != "" { + info = append(info, yaml.MapItem{"termsOfService", m.TermsOfService}) + } + if m.Contact != nil { + info = append(info, yaml.MapItem{"contact", m.Contact.ToRawInfo()}) + } + // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.License != nil { + info = append(info, yaml.MapItem{"license", m.License.ToRawInfo()}) + } + // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Schema) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"schema", items}) + } + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. +func (m *JsonReference) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{"url", m.Url}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. +func (m *NamedHeader) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. +func (m *NamedParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. +func (m *NamedResponse) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. +func (m *NamedResponseValue) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. +func (m *NamedSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. +func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Value != "" { + info = append(info, yaml.MapItem{"value", m.Value}) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. +func (m *NonBodyParameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // NonBodyParameter + // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeaderParameterSubSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFormDataParameterSubSchema() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetQueryParameterSubSchema() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetPathParameterSubSchema() + if v3 != nil { + return v3.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. +func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.AuthorizationUrl != "" { + info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + } + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. +func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. +func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.AuthorizationUrl != "" { + info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. +func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Flow != "" { + info = append(info, yaml.MapItem{"flow", m.Flow}) + } + if m.Scopes != nil { + info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.TokenUrl != "" { + info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. +func (m *Oauth2Scopes) ToRawInfo() interface{} { + info := yaml.MapSlice{} + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Tags) != 0 { + info = append(info, yaml.MapItem{"tags", m.Tags}) + } + if m.Summary != "" { + info = append(info, yaml.MapItem{"summary", m.Summary}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.OperationId != "" { + info = append(info, yaml.MapItem{"operationId", m.OperationId}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{"produces", m.Produces}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{"consumes", m.Consumes}) + } + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"parameters", items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + if m.Responses != nil { + info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + } + // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{"schemes", m.Schemes}) + } + if m.Deprecated != false { + info = append(info, yaml.MapItem{"deprecated", m.Deprecated}) + } + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"security", items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // Parameter + // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBodyParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetNonBodyParameter() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. +func (m *ParameterDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. +func (m *ParametersItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ParametersItem + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Get != nil { + info = append(info, yaml.MapItem{"get", m.Get.ToRawInfo()}) + } + // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Put != nil { + info = append(info, yaml.MapItem{"put", m.Put.ToRawInfo()}) + } + // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Post != nil { + info = append(info, yaml.MapItem{"post", m.Post.ToRawInfo()}) + } + // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Delete != nil { + info = append(info, yaml.MapItem{"delete", m.Delete.ToRawInfo()}) + } + // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Options != nil { + info = append(info, yaml.MapItem{"options", m.Options.ToRawInfo()}) + } + // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Head != nil { + info = append(info, yaml.MapItem{"head", m.Head.ToRawInfo()}) + } + // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Patch != nil { + info = append(info, yaml.MapItem{"patch", m.Patch.ToRawInfo()}) + } + // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"parameters", items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. +func (m *PathParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + if m.Path != nil { + for _, item := range m.Path { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. +func (m *PrimitivesItems) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. +func (m *QueryParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Required != false { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{"in", m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{"type", m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Schema != nil { + info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + } + // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Headers != nil { + info = append(info, yaml.MapItem{"headers", m.Headers.ToRawInfo()}) + } + // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Examples != nil { + info = append(info, yaml.MapItem{"examples", m.Examples.ToRawInfo()}) + } + // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. +func (m *ResponseDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. +func (m *ResponseValue) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ResponseValue + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.ResponseCode != nil { + for _, item := range m.ResponseCode { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.XRef != "" { + info = append(info, yaml.MapItem{"$ref", m.XRef}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{"format", m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{"title", m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + } + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{"maximum", m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{"minimum", m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{"minLength", m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{"pattern", m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{"minItems", m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + } + if m.MaxProperties != 0 { + info = append(info, yaml.MapItem{"maxProperties", m.MaxProperties}) + } + if m.MinProperties != 0 { + info = append(info, yaml.MapItem{"minProperties", m.MinProperties}) + } + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{"required", m.Required}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"enum", items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.AdditionalProperties != nil { + info = append(info, yaml.MapItem{"additionalProperties", m.AdditionalProperties.ToRawInfo()}) + } + // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Type != nil { + if len(m.Type.Value) == 1 { + info = append(info, yaml.MapItem{"type", m.Type.Value[0]}) + } else { + info = append(info, yaml.MapItem{"type", m.Type.Value}) + } + } + // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Items != nil { + items := make([]interface{}, 0) + for _, item := range m.Items.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"items", items[0]}) + } + // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.AllOf) != 0 { + items := make([]interface{}, 0) + for _, item := range m.AllOf { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{"allOf", items}) + } + // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.Properties != nil { + info = append(info, yaml.MapItem{"properties", m.Properties.ToRawInfo()}) + } + // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Discriminator != "" { + info = append(info, yaml.MapItem{"discriminator", m.Discriminator}) + } + if m.ReadOnly != false { + info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + } + if m.Xml != nil { + info = append(info, yaml.MapItem{"xml", m.Xml.ToRawInfo()}) + } + // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. +func (m *SchemaItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SchemaItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFileSchema() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. +func (m *SecurityDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. +func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SecurityDefinitionsItem + // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBasicAuthenticationSecurity() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetApiKeySecurity() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetOauth2ImplicitSecurity() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetOauth2PasswordSecurity() + if v3 != nil { + return v3.ToRawInfo() + } + // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v4 := m.GetOauth2ApplicationSecurity() + if v4 != nil { + return v4.ToRawInfo() + } + // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v5 := m.GetOauth2AccessCodeSecurity() + if v5 != nil { + return v5.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() interface{} { + return m.Value +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{"description", m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. +func (m *TypeItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if len(m.Value) != 0 { + info = append(info, yaml.MapItem{"value", m.Value}) + } + return info +} + +// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. +func (m *VendorExtension) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m.Name != "" { + info = append(info, yaml.MapItem{"name", m.Name}) + } + if m.Namespace != "" { + info = append(info, yaml.MapItem{"namespace", m.Namespace}) + } + if m.Prefix != "" { + info = append(info, yaml.MapItem{"prefix", m.Prefix}) + } + if m.Attribute != false { + info = append(info, yaml.MapItem{"attribute", m.Attribute}) + } + if m.Wrapped != false { + info = append(info, yaml.MapItem{"wrapped", m.Wrapped}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +var ( + pattern0 = regexp.MustCompile("^x-") + pattern1 = regexp.MustCompile("^/") + pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") +) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go new file mode 100644 index 00000000000..37da7df256f --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -0,0 +1,4456 @@ +// Code generated by protoc-gen-go. +// source: OpenAPIv2/OpenAPIv2.proto +// DO NOT EDIT! + +/* +Package openapi_v2 is a generated protocol buffer package. + +It is generated from these files: + OpenAPIv2/OpenAPIv2.proto + +It has these top-level messages: + AdditionalPropertiesItem + Any + ApiKeySecurity + BasicAuthenticationSecurity + BodyParameter + Contact + Default + Definitions + Document + Examples + ExternalDocs + FileSchema + FormDataParameterSubSchema + Header + HeaderParameterSubSchema + Headers + Info + ItemsItem + JsonReference + License + NamedAny + NamedHeader + NamedParameter + NamedPathItem + NamedResponse + NamedResponseValue + NamedSchema + NamedSecurityDefinitionsItem + NamedString + NamedStringArray + NonBodyParameter + Oauth2AccessCodeSecurity + Oauth2ApplicationSecurity + Oauth2ImplicitSecurity + Oauth2PasswordSecurity + Oauth2Scopes + Operation + Parameter + ParameterDefinitions + ParametersItem + PathItem + PathParameterSubSchema + Paths + PrimitivesItems + Properties + QueryParameterSubSchema + Response + ResponseDefinitions + ResponseValue + Responses + Schema + SchemaItem + SecurityDefinitions + SecurityDefinitionsItem + SecurityRequirement + StringArray + Tag + TypeItem + VendorExtension + Xml +*/ +package openapi_v2 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AdditionalPropertiesItem struct { + // Types that are valid to be assigned to Oneof: + // *AdditionalPropertiesItem_Schema + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } +func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } +func (*AdditionalPropertiesItem) ProtoMessage() {} +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` +} + +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } +} + +func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *AdditionalPropertiesItem_Boolean: + t := uint64(0) + if x.Boolean { + t = 1 + } + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AdditionalPropertiesItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &AdditionalPropertiesItem_Schema{msg} + return true, err + case 2: // oneof.boolean + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} + return true, err + default: + return false, nil + } +} + +func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AdditionalPropertiesItem_Boolean: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Any struct { + Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Any) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +func (m *Any) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +type ApiKeySecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } +func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } +func (*ApiKeySecurity) ProtoMessage() {} +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ApiKeySecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ApiKeySecurity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ApiKeySecurity) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *ApiKeySecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BasicAuthenticationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } +func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } +func (*BasicAuthenticationSecurity) ProtoMessage() {} +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *BasicAuthenticationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BodyParameter struct { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BodyParameter) Reset() { *m = BodyParameter{} } +func (m *BodyParameter) String() string { return proto.CompactTextString(m) } +func (*BodyParameter) ProtoMessage() {} +func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *BodyParameter) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BodyParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BodyParameter) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *BodyParameter) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *BodyParameter) GetSchema() *Schema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *BodyParameter) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Contact information for the owners of the API. +type Contact struct { + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the contact information. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + // The email address of the contact person/organization. + Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Contact) Reset() { *m = Contact{} } +func (m *Contact) String() string { return proto.CompactTextString(m) } +func (*Contact) ProtoMessage() {} +func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Contact) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Contact) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *Contact) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *Contact) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Default struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Default) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +type Definitions struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Definitions) Reset() { *m = Definitions{} } +func (m *Definitions) String() string { return proto.CompactTextString(m) } +func (*Definitions) ProtoMessage() {} +func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Definitions) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Document struct { + // The Swagger version of this document. + Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + // The host (name or ip) of the API. Example: 'swagger.io' + Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` + // The base path to the API. Example: '/api'. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` + // A list of MIME types accepted by the API. + Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *Document) GetSwagger() string { + if m != nil { + return m.Swagger + } + return "" +} + +func (m *Document) GetInfo() *Info { + if m != nil { + return m.Info + } + return nil +} + +func (m *Document) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Document) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +func (m *Document) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Document) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Document) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Document) GetPaths() *Paths { + if m != nil { + return m.Paths + } + return nil +} + +func (m *Document) GetDefinitions() *Definitions { + if m != nil { + return m.Definitions + } + return nil +} + +func (m *Document) GetParameters() *ParameterDefinitions { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Document) GetResponses() *ResponseDefinitions { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Document) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { + if m != nil { + return m.SecurityDefinitions + } + return nil +} + +func (m *Document) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Document) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Document) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Examples struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Examples) Reset() { *m = Examples{} } +func (m *Examples) String() string { return proto.CompactTextString(m) } +func (*Examples) ProtoMessage() {} +func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *Examples) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// information about external documentation +type ExternalDocs struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } +func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } +func (*ExternalDocs) ProtoMessage() {} +func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ExternalDocs) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ExternalDocs) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ExternalDocs) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type FileSchema struct { + Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FileSchema) Reset() { *m = FileSchema{} } +func (m *FileSchema) String() string { return proto.CompactTextString(m) } +func (*FileSchema) ProtoMessage() {} +func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *FileSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FileSchema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *FileSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FileSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FileSchema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *FileSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FileSchema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *FileSchema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *FileSchema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *FileSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type FormDataParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } +func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*FormDataParameterSubSchema) ProtoMessage() {} +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *FormDataParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *FormDataParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FormDataParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *FormDataParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FormDataParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *FormDataParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *FormDataParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *FormDataParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Header struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *Header) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Header) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Header) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *Header) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *Header) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Header) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Header) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Header) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Header) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Header) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Header) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Header) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Header) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Header) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Header) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Header) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Header) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Header) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Header) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type HeaderParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } +func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*HeaderParameterSubSchema) ProtoMessage() {} +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *HeaderParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *HeaderParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *HeaderParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *HeaderParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *HeaderParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *HeaderParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *HeaderParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *HeaderParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Headers struct { + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Headers) Reset() { *m = Headers{} } +func (m *Headers) String() string { return proto.CompactTextString(m) } +func (*Headers) ProtoMessage() {} +func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *Headers) GetAdditionalProperties() []*NamedHeader { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// General information about the API. +type Info struct { + // A unique and precise title of the API. + Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` + // A semantic version number of the API. + Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The terms of service for the API. + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *Info) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Info) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Info) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Info) GetTermsOfService() string { + if m != nil { + return m.TermsOfService + } + return "" +} + +func (m *Info) GetContact() *Contact { + if m != nil { + return m.Contact + } + return nil +} + +func (m *Info) GetLicense() *License { + if m != nil { + return m.License + } + return nil +} + +func (m *Info) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type ItemsItem struct { + Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` +} + +func (m *ItemsItem) Reset() { *m = ItemsItem{} } +func (m *ItemsItem) String() string { return proto.CompactTextString(m) } +func (*ItemsItem) ProtoMessage() {} +func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *ItemsItem) GetSchema() []*Schema { + if m != nil { + return m.Schema + } + return nil +} + +type JsonReference struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *JsonReference) Reset() { *m = JsonReference{} } +func (m *JsonReference) String() string { return proto.CompactTextString(m) } +func (*JsonReference) ProtoMessage() {} +func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *JsonReference) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *JsonReference) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type License struct { + // The name of the license type. It's encouraged to use an OSI compatible license. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the license. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *License) Reset() { *m = License{} } +func (m *License) String() string { return proto.CompactTextString(m) } +func (*License) ProtoMessage() {} +func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *License) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *License) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *License) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedAny) Reset() { *m = NamedAny{} } +func (m *NamedAny) String() string { return proto.CompactTextString(m) } +func (*NamedAny) ProtoMessage() {} +func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *NamedAny) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedAny) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +type NamedHeader struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedHeader) Reset() { *m = NamedHeader{} } +func (m *NamedHeader) String() string { return proto.CompactTextString(m) } +func (*NamedHeader) ProtoMessage() {} +func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *NamedHeader) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedHeader) GetValue() *Header { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +type NamedParameter struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedParameter) Reset() { *m = NamedParameter{} } +func (m *NamedParameter) String() string { return proto.CompactTextString(m) } +func (*NamedParameter) ProtoMessage() {} +func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *NamedParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedParameter) GetValue() *Parameter { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } +func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } +func (*NamedPathItem) ProtoMessage() {} +func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *NamedPathItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedPathItem) GetValue() *PathItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +type NamedResponse struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponse) Reset() { *m = NamedResponse{} } +func (m *NamedResponse) String() string { return proto.CompactTextString(m) } +func (*NamedResponse) ProtoMessage() {} +func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *NamedResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponse) GetValue() *Response { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +type NamedResponseValue struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } +func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } +func (*NamedResponseValue) ProtoMessage() {} +func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *NamedResponseValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponseValue) GetValue() *ResponseValue { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +type NamedSchema struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSchema) Reset() { *m = NamedSchema{} } +func (m *NamedSchema) String() string { return proto.CompactTextString(m) } +func (*NamedSchema) ProtoMessage() {} +func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *NamedSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSchema) GetValue() *Schema { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +type NamedSecurityDefinitionsItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } +func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *NamedSecurityDefinitionsItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedString) Reset() { *m = NamedString{} } +func (m *NamedString) String() string { return proto.CompactTextString(m) } +func (*NamedString) ProtoMessage() {} +func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *NamedString) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } +func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } +func (*NamedStringArray) ProtoMessage() {} +func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *NamedStringArray) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedStringArray) GetValue() *StringArray { + if m != nil { + return m.Value + } + return nil +} + +type NonBodyParameter struct { + // Types that are valid to be assigned to Oneof: + // *NonBodyParameter_HeaderParameterSubSchema + // *NonBodyParameter_FormDataParameterSubSchema + // *NonBodyParameter_QueryParameterSubSchema + // *NonBodyParameter_PathParameterSubSchema + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } +func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } +func (*NonBodyParameter) ProtoMessage() {} +func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() +} + +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` +} +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` +} +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` +} +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` +} + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { + return x.HeaderParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { + return x.FormDataParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { + return x.QueryParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { + return x.PathParameterSubSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } +} + +func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_FormDataParameterSubSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_QueryParameterSubSchema: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_PathParameterSubSchema: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NonBodyParameter) + switch tag { + case 1: // oneof.header_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HeaderParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} + return true, err + case 2: // oneof.form_data_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FormDataParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} + return true, err + case 3: // oneof.query_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} + return true, err + case 4: // oneof.path_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PathParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} + return true, err + default: + return false, nil + } +} + +func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + s := proto.Size(x.HeaderParameterSubSchema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_FormDataParameterSubSchema: + s := proto.Size(x.FormDataParameterSubSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_QueryParameterSubSchema: + s := proto.Size(x.QueryParameterSubSchema) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_PathParameterSubSchema: + s := proto.Size(x.PathParameterSubSchema) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oauth2AccessCodeSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } +func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *Oauth2AccessCodeSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ApplicationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } +func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ApplicationSecurity) ProtoMessage() {} +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *Oauth2ApplicationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ImplicitSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } +func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ImplicitSecurity) ProtoMessage() {} +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *Oauth2ImplicitSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2PasswordSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } +func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2PasswordSecurity) ProtoMessage() {} +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *Oauth2PasswordSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2PasswordSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2Scopes struct { + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } +func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } +func (*Oauth2Scopes) ProtoMessage() {} +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Operation struct { + Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` + // A brief summary of the operation. + Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + // A longer description of the operation, GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + // A unique identifier of the operation. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` + // A list of MIME types the API can consume. + Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *Operation) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Operation) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Operation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Operation) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Operation) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *Operation) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Operation) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Operation) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Operation) GetResponses() *Responses { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Operation) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Operation) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +func (m *Operation) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Operation) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Parameter struct { + // Types that are valid to be assigned to Oneof: + // *Parameter_BodyParameter + // *Parameter_NonBodyParameter + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *Parameter) Reset() { *m = Parameter{} } +func (m *Parameter) String() string { return proto.CompactTextString(m) } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` +} +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + +func (m *Parameter) GetOneof() isParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { + return x.BodyParameter + } + return nil +} + +func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { + return x.NonBodyParameter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } +} + +func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BodyParameter); err != nil { + return err + } + case *Parameter_NonBodyParameter: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NonBodyParameter); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Parameter) + switch tag { + case 1: // oneof.body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_BodyParameter{msg} + return true, err + case 2: // oneof.non_body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NonBodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_NonBodyParameter{msg} + return true, err + default: + return false, nil + } +} + +func _Parameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + s := proto.Size(x.BodyParameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Parameter_NonBodyParameter: + s := proto.Size(x.NonBodyParameter) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// One or more JSON representations for parameters +type ParameterDefinitions struct { + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } +func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } +func (*ParameterDefinitions) ProtoMessage() {} +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ParametersItem struct { + // Types that are valid to be assigned to Oneof: + // *ParametersItem_Parameter + // *ParametersItem_JsonReference + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ParametersItem) Reset() { *m = ParametersItem{} } +func (m *ParametersItem) String() string { return proto.CompactTextString(m) } +func (*ParametersItem) ProtoMessage() {} +func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` +} +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + +func (m *ParametersItem) GetOneof() isParametersItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ParametersItem) GetParameter() *Parameter { + if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { + return x.Parameter + } + return nil +} + +func (m *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } +} + +func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Parameter); err != nil { + return err + } + case *ParametersItem_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ParametersItem) + switch tag { + case 1: // oneof.parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Parameter) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_Parameter{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ParametersItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + s := proto.Size(x.Parameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ParametersItem_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type PathItem struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathItem) Reset() { *m = PathItem{} } +func (m *PathItem) String() string { return proto.CompactTextString(m) } +func (*PathItem) ProtoMessage() {} +func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *PathItem) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *PathItem) GetGet() *Operation { + if m != nil { + return m.Get + } + return nil +} + +func (m *PathItem) GetPut() *Operation { + if m != nil { + return m.Put + } + return nil +} + +func (m *PathItem) GetPost() *Operation { + if m != nil { + return m.Post + } + return nil +} + +func (m *PathItem) GetDelete() *Operation { + if m != nil { + return m.Delete + } + return nil +} + +func (m *PathItem) GetOptions() *Operation { + if m != nil { + return m.Options + } + return nil +} + +func (m *PathItem) GetHead() *Operation { + if m != nil { + return m.Head + } + return nil +} + +func (m *PathItem) GetPatch() *Operation { + if m != nil { + return m.Patch + } + return nil +} + +func (m *PathItem) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *PathItem) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type PathParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } +func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*PathParameterSubSchema) ProtoMessage() {} +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *PathParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *PathParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *PathParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PathParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PathParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PathParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PathParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PathParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PathParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PathParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PathParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PathParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PathParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PathParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +type Paths struct { + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` +} + +func (m *Paths) Reset() { *m = Paths{} } +func (m *Paths) String() string { return proto.CompactTextString(m) } +func (*Paths) ProtoMessage() {} +func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func (m *Paths) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func (m *Paths) GetPath() []*NamedPathItem { + if m != nil { + return m.Path + } + return nil +} + +type PrimitivesItems struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } +func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } +func (*PrimitivesItems) ProtoMessage() {} +func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + +func (m *PrimitivesItems) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PrimitivesItems) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PrimitivesItems) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PrimitivesItems) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PrimitivesItems) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PrimitivesItems) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PrimitivesItems) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PrimitivesItems) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PrimitivesItems) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PrimitivesItems) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PrimitivesItems) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PrimitivesItems) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PrimitivesItems) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PrimitivesItems) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PrimitivesItems) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Properties struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Properties) Reset() { *m = Properties{} } +func (m *Properties) String() string { return proto.CompactTextString(m) } +func (*Properties) ProtoMessage() {} +func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } + +func (m *Properties) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type QueryParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } +func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*QueryParameterSubSchema) ProtoMessage() {} +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + +func (m *QueryParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *QueryParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *QueryParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *QueryParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *QueryParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *QueryParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *QueryParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *QueryParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *QueryParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *QueryParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *QueryParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *QueryParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *QueryParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *QueryParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Response struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } + +func (m *Response) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Response) GetSchema() *SchemaItem { + if m != nil { + return m.Schema + } + return nil +} + +func (m *Response) GetHeaders() *Headers { + if m != nil { + return m.Headers + } + return nil +} + +func (m *Response) GetExamples() *Examples { + if m != nil { + return m.Examples + } + return nil +} + +func (m *Response) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// One or more JSON representations for parameters +type ResponseDefinitions struct { + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } +func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } +func (*ResponseDefinitions) ProtoMessage() {} +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } + +func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ResponseValue struct { + // Types that are valid to be assigned to Oneof: + // *ResponseValue_Response + // *ResponseValue_JsonReference + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ResponseValue) Reset() { *m = ResponseValue{} } +func (m *ResponseValue) String() string { return proto.CompactTextString(m) } +func (*ResponseValue) ProtoMessage() {} +func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } + +type isResponseValue_Oneof interface { + isResponseValue_Oneof() +} + +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` +} +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ResponseValue_Response) isResponseValue_Oneof() {} +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +func (m *ResponseValue) GetOneof() isResponseValue_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ResponseValue) GetResponse() *Response { + if x, ok := m.GetOneof().(*ResponseValue_Response); ok { + return x.Response + } + return nil +} + +func (m *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } +} + +func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Response); err != nil { + return err + } + case *ResponseValue_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) + } + return nil +} + +func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseValue) + switch tag { + case 1: // oneof.response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Response) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_Response{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ResponseValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + s := proto.Size(x.Response) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseValue_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Responses) Reset() { *m = Responses{} } +func (m *Responses) String() string { return proto.CompactTextString(m) } +func (*Responses) ProtoMessage() {} +func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } + +func (m *Responses) GetResponseCode() []*NamedResponseValue { + if m != nil { + return m.ResponseCode + } + return nil +} + +func (m *Responses) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type Schema struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + +func (m *Schema) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *Schema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Schema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Schema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Schema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Schema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Schema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Schema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Schema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Schema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Schema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Schema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Schema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Schema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Schema) GetMaxProperties() int64 { + if m != nil { + return m.MaxProperties + } + return 0 +} + +func (m *Schema) GetMinProperties() int64 { + if m != nil { + return m.MinProperties + } + return 0 +} + +func (m *Schema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *Schema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +func (m *Schema) GetType() *TypeItem { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema) GetItems() *ItemsItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *Schema) GetAllOf() []*Schema { + if m != nil { + return m.AllOf + } + return nil +} + +func (m *Schema) GetProperties() *Properties { + if m != nil { + return m.Properties + } + return nil +} + +func (m *Schema) GetDiscriminator() string { + if m != nil { + return m.Discriminator + } + return "" +} + +func (m *Schema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *Schema) GetXml() *Xml { + if m != nil { + return m.Xml + } + return nil +} + +func (m *Schema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Schema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *Schema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type SchemaItem struct { + // Types that are valid to be assigned to Oneof: + // *SchemaItem_Schema + // *SchemaItem_FileSchema + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SchemaItem) Reset() { *m = SchemaItem{} } +func (m *SchemaItem) String() string { return proto.CompactTextString(m) } +func (*SchemaItem) ProtoMessage() {} +func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } + +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() +} + +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` +} + +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SchemaItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { + return x.FileSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } +} + +func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *SchemaItem_FileSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SchemaItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_Schema{msg} + return true, err + case 2: // oneof.file_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileSchema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_FileSchema{msg} + return true, err + default: + return false, nil + } +} + +func _SchemaItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SchemaItem_FileSchema: + s := proto.Size(x.FileSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityDefinitions struct { + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } +func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitions) ProtoMessage() {} +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } + +func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type SecurityDefinitionsItem struct { + // Types that are valid to be assigned to Oneof: + // *SecurityDefinitionsItem_BasicAuthenticationSecurity + // *SecurityDefinitionsItem_ApiKeySecurity + // *SecurityDefinitionsItem_Oauth2ImplicitSecurity + // *SecurityDefinitionsItem_Oauth2PasswordSecurity + // *SecurityDefinitionsItem_Oauth2ApplicationSecurity + // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } +func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitionsItem) ProtoMessage() {} +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } + +type isSecurityDefinitionsItem_Oneof interface { + isSecurityDefinitionsItem_Oneof() +} + +type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` +} +type SecurityDefinitionsItem_ApiKeySecurity struct { + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` +} + +func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } +} + +func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_ApiKeySecurity: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SecurityDefinitionsItem) + switch tag { + case 1: // oneof.basic_authentication_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BasicAuthenticationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} + return true, err + case 2: // oneof.api_key_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ApiKeySecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} + return true, err + case 3: // oneof.oauth2_implicit_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ImplicitSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} + return true, err + case 4: // oneof.oauth2_password_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2PasswordSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} + return true, err + case 5: // oneof.oauth2_application_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ApplicationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} + return true, err + case 6: // oneof.oauth2_access_code_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2AccessCodeSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} + return true, err + default: + return false, nil + } +} + +func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + s := proto.Size(x.BasicAuthenticationSecurity) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_ApiKeySecurity: + s := proto.Size(x.ApiKeySecurity) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + s := proto.Size(x.Oauth2ImplicitSecurity) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + s := proto.Size(x.Oauth2PasswordSecurity) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + s := proto.Size(x.Oauth2ApplicationSecurity) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + s := proto.Size(x.Oauth2AccessCodeSecurity) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityRequirement struct { + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } +func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } +func (*SecurityRequirement) ProtoMessage() {} +func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } + +func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type StringArray struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *StringArray) Reset() { *m = StringArray{} } +func (m *StringArray) String() string { return proto.CompactTextString(m) } +func (*StringArray) ProtoMessage() {} +func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } + +func (m *StringArray) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Tag struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } + +func (m *Tag) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Tag) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Tag) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Tag) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type TypeItem struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *TypeItem) Reset() { *m = TypeItem{} } +func (m *TypeItem) String() string { return proto.CompactTextString(m) } +func (*TypeItem) ProtoMessage() {} +func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } + +func (m *TypeItem) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +// Any property starting with x- is valid. +type VendorExtension struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *VendorExtension) Reset() { *m = VendorExtension{} } +func (m *VendorExtension) String() string { return proto.CompactTextString(m) } +func (*VendorExtension) ProtoMessage() {} +func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } + +func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Xml struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Xml) Reset() { *m = Xml{} } +func (m *Xml) String() string { return proto.CompactTextString(m) } +func (*Xml) ProtoMessage() {} +func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } + +func (m *Xml) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Xml) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *Xml) GetPrefix() string { + if m != nil { + return m.Prefix + } + return "" +} + +func (m *Xml) GetAttribute() bool { + if m != nil { + return m.Attribute + } + return false +} + +func (m *Xml) GetWrapped() bool { + if m != nil { + return m.Wrapped + } + return false +} + +func (m *Xml) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func init() { + proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") + proto.RegisterType((*Any)(nil), "openapi.v2.Any") + proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") + proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") + proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") + proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") + proto.RegisterType((*Default)(nil), "openapi.v2.Default") + proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") + proto.RegisterType((*Document)(nil), "openapi.v2.Document") + proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") + proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") + proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") + proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") + proto.RegisterType((*Header)(nil), "openapi.v2.Header") + proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") + proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") + proto.RegisterType((*Info)(nil), "openapi.v2.Info") + proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") + proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") + proto.RegisterType((*License)(nil), "openapi.v2.License") + proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") + proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") + proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") + proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") + proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") + proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") + proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") + proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") + proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") + proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") + proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") + proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") + proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") + proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") + proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") + proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") + proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") + proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") + proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") + proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") + proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") + proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") + proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") + proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") + proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") + proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") + proto.RegisterType((*Response)(nil), "openapi.v2.Response") + proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") + proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") + proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") + proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") + proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") + proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") + proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") + proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") + proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") + proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") + proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") + proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") + proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") +} + +func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 3129 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, + 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, + 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, + 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, + 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, + 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d, + 0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7, + 0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7, + 0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f, + 0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c, + 0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c, + 0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc, + 0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01, + 0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2, + 0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5, + 0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23, + 0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51, + 0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2, + 0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5, + 0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c, + 0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89, + 0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f, + 0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d, + 0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c, + 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e, + 0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1, + 0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55, + 0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87, + 0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4, + 0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30, + 0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6, + 0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17, + 0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4, + 0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74, + 0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2, + 0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4, + 0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0, + 0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa, + 0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61, + 0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e, + 0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b, + 0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0, + 0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7, + 0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5, + 0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e, + 0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04, + 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d, + 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d, + 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13, + 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18, + 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9, + 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73, + 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a, + 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9, + 0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8, + 0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19, + 0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9, + 0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60, + 0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29, + 0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92, + 0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc, + 0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a, + 0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe, + 0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6, + 0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8, + 0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90, + 0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf, + 0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64, + 0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0, + 0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d, + 0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18, + 0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4, + 0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58, + 0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21, + 0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d, + 0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9, + 0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67, + 0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17, + 0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27, + 0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5, + 0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a, + 0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4, + 0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e, + 0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2, + 0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87, + 0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38, + 0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93, + 0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0, + 0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b, + 0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2, + 0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86, + 0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c, + 0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02, + 0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3, + 0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3, + 0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e, + 0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7, + 0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94, + 0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34, + 0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a, + 0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8, + 0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b, + 0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5, + 0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e, + 0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89, + 0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6, + 0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c, + 0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14, + 0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41, + 0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7, + 0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d, + 0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc, + 0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7, + 0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3, + 0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69, + 0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36, + 0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e, + 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c, + 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab, + 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33, + 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07, + 0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f, + 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f, + 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b, + 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc, + 0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e, + 0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad, + 0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16, + 0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89, + 0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45, + 0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8, + 0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b, + 0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08, + 0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93, + 0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7, + 0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb, + 0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74, + 0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41, + 0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20, + 0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a, + 0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94, + 0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e, + 0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4, + 0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6, + 0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36, + 0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a, + 0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58, + 0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46, + 0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88, + 0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c, + 0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81, + 0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61, + 0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4, + 0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea, + 0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf, + 0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b, + 0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0, + 0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4, + 0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5, + 0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44, + 0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f, + 0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36, + 0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd, + 0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e, + 0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb, + 0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d, + 0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77, + 0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9, + 0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5, + 0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4, + 0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6, + 0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f, + 0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb, + 0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7, + 0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66, + 0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9, + 0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d, + 0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2, + 0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c, + 0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2, + 0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62, + 0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7, + 0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1, + 0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21, + 0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e, + 0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6, + 0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe, + 0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c, + 0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae, + 0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e, + 0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1, + 0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8, + 0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47, + 0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf, + 0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, + 0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto new file mode 100644 index 00000000000..557c88072cd --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto @@ -0,0 +1,663 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v2; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v2"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +message AdditionalPropertiesItem { + oneof oneof { + Schema schema = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message ApiKeySecurity { + string type = 1; + string name = 2; + string in = 3; + string description = 4; + repeated NamedAny vendor_extension = 5; +} + +message BasicAuthenticationSecurity { + string type = 1; + string description = 2; + repeated NamedAny vendor_extension = 3; +} + +message BodyParameter { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 1; + // The name of the parameter. + string name = 2; + // Determines the location of the parameter. + string in = 3; + // Determines whether or not this parameter is required or optional. + bool required = 4; + Schema schema = 5; + repeated NamedAny vendor_extension = 6; +} + +// Contact information for the owners of the API. +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. + string url = 2; + // The email address of the contact person/organization. + string email = 3; + repeated NamedAny vendor_extension = 4; +} + +message Default { + repeated NamedAny additional_properties = 1; +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +message Definitions { + repeated NamedSchema additional_properties = 1; +} + +message Document { + // The Swagger version of this document. + string swagger = 1; + Info info = 2; + // The host (name or ip) of the API. Example: 'swagger.io' + string host = 3; + // The base path to the API. Example: '/api'. + string base_path = 4; + // The transfer protocol of the API. + repeated string schemes = 5; + // A list of MIME types accepted by the API. + repeated string consumes = 6; + // A list of MIME types the API can produce. + repeated string produces = 7; + Paths paths = 8; + Definitions definitions = 9; + ParameterDefinitions parameters = 10; + ResponseDefinitions responses = 11; + repeated SecurityRequirement security = 12; + SecurityDefinitions security_definitions = 13; + repeated Tag tags = 14; + ExternalDocs external_docs = 15; + repeated NamedAny vendor_extension = 16; +} + +message Examples { + repeated NamedAny additional_properties = 1; +} + +// information about external documentation +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// A deterministic version of a JSON Schema object. +message FileSchema { + string format = 1; + string title = 2; + string description = 3; + Any default = 4; + repeated string required = 5; + string type = 6; + bool read_only = 7; + ExternalDocs external_docs = 8; + Any example = 9; + repeated NamedAny vendor_extension = 10; +} + +message FormDataParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Header { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + string description = 18; + repeated NamedAny vendor_extension = 19; +} + +message HeaderParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +message Headers { + repeated NamedHeader additional_properties = 1; +} + +// General information about the API. +message Info { + // A unique and precise title of the API. + string title = 1; + // A semantic version number of the API. + string version = 2; + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + string description = 3; + // The terms of service for the API. + string terms_of_service = 4; + Contact contact = 5; + License license = 6; + repeated NamedAny vendor_extension = 7; +} + +message ItemsItem { + repeated Schema schema = 1; +} + +message JsonReference { + string _ref = 1; + string description = 2; +} + +message License { + // The name of the license type. It's encouraged to use an OSI compatible license. + string name = 1; + // The URL pointing to the license. + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +message NamedHeader { + // Map key + string name = 1; + // Mapped value + Header value = 2; +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +message NamedParameter { + // Map key + string name = 1; + // Mapped value + Parameter value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +message NamedResponse { + // Map key + string name = 1; + // Mapped value + Response value = 2; +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +message NamedResponseValue { + // Map key + string name = 1; + // Mapped value + ResponseValue value = 2; +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +message NamedSchema { + // Map key + string name = 1; + // Mapped value + Schema value = 2; +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +message NamedSecurityDefinitionsItem { + // Map key + string name = 1; + // Mapped value + SecurityDefinitionsItem value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +message NonBodyParameter { + oneof oneof { + HeaderParameterSubSchema header_parameter_sub_schema = 1; + FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + QueryParameterSubSchema query_parameter_sub_schema = 3; + PathParameterSubSchema path_parameter_sub_schema = 4; + } +} + +message Oauth2AccessCodeSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string token_url = 5; + string description = 6; + repeated NamedAny vendor_extension = 7; +} + +message Oauth2ApplicationSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2ImplicitSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2PasswordSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2Scopes { + repeated NamedString additional_properties = 1; +} + +message Operation { + repeated string tags = 1; + // A brief summary of the operation. + string summary = 2; + // A longer description of the operation, GitHub Flavored Markdown is allowed. + string description = 3; + ExternalDocs external_docs = 4; + // A unique identifier of the operation. + string operation_id = 5; + // A list of MIME types the API can produce. + repeated string produces = 6; + // A list of MIME types the API can consume. + repeated string consumes = 7; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 8; + Responses responses = 9; + // The transfer protocol of the API. + repeated string schemes = 10; + bool deprecated = 11; + repeated SecurityRequirement security = 12; + repeated NamedAny vendor_extension = 13; +} + +message Parameter { + oneof oneof { + BodyParameter body_parameter = 1; + NonBodyParameter non_body_parameter = 2; + } +} + +// One or more JSON representations for parameters +message ParameterDefinitions { + repeated NamedParameter additional_properties = 1; +} + +message ParametersItem { + oneof oneof { + Parameter parameter = 1; + JsonReference json_reference = 2; + } +} + +message PathItem { + string _ref = 1; + Operation get = 2; + Operation put = 3; + Operation post = 4; + Operation delete = 5; + Operation options = 6; + Operation head = 7; + Operation patch = 8; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 9; + repeated NamedAny vendor_extension = 10; +} + +message PathParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +message Paths { + repeated NamedAny vendor_extension = 1; + repeated NamedPathItem path = 2; +} + +message PrimitivesItems { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + repeated NamedAny vendor_extension = 18; +} + +message Properties { + repeated NamedSchema additional_properties = 1; +} + +message QueryParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Response { + string description = 1; + SchemaItem schema = 2; + Headers headers = 3; + Examples examples = 4; + repeated NamedAny vendor_extension = 5; +} + +// One or more JSON representations for parameters +message ResponseDefinitions { + repeated NamedResponse additional_properties = 1; +} + +message ResponseValue { + oneof oneof { + Response response = 1; + JsonReference json_reference = 2; + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +message Responses { + repeated NamedResponseValue response_code = 1; + repeated NamedAny vendor_extension = 2; +} + +// A deterministic version of a JSON Schema object. +message Schema { + string _ref = 1; + string format = 2; + string title = 3; + string description = 4; + Any default = 5; + double multiple_of = 6; + double maximum = 7; + bool exclusive_maximum = 8; + double minimum = 9; + bool exclusive_minimum = 10; + int64 max_length = 11; + int64 min_length = 12; + string pattern = 13; + int64 max_items = 14; + int64 min_items = 15; + bool unique_items = 16; + int64 max_properties = 17; + int64 min_properties = 18; + repeated string required = 19; + repeated Any enum = 20; + AdditionalPropertiesItem additional_properties = 21; + TypeItem type = 22; + ItemsItem items = 23; + repeated Schema all_of = 24; + Properties properties = 25; + string discriminator = 26; + bool read_only = 27; + Xml xml = 28; + ExternalDocs external_docs = 29; + Any example = 30; + repeated NamedAny vendor_extension = 31; +} + +message SchemaItem { + oneof oneof { + Schema schema = 1; + FileSchema file_schema = 2; + } +} + +message SecurityDefinitions { + repeated NamedSecurityDefinitionsItem additional_properties = 1; +} + +message SecurityDefinitionsItem { + oneof oneof { + BasicAuthenticationSecurity basic_authentication_security = 1; + ApiKeySecurity api_key_security = 2; + Oauth2ImplicitSecurity oauth2_implicit_security = 3; + Oauth2PasswordSecurity oauth2_password_security = 4; + Oauth2ApplicationSecurity oauth2_application_security = 5; + Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + } +} + +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +message StringArray { + repeated string value = 1; +} + +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny vendor_extension = 4; +} + +message TypeItem { + repeated string value = 1; +} + +// Any property starting with x- is valid. +message VendorExtension { + repeated NamedAny additional_properties = 1; +} + +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny vendor_extension = 6; +} + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md new file mode 100644 index 00000000000..836fb32a7ef --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md @@ -0,0 +1,16 @@ +# OpenAPI v2 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model +and related code for supporting OpenAPI v2. + +Gnostic applications and plugins can use OpenAPIv2.proto +to generate Protocol Buffer support code for their preferred languages. + +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI +descriptions into the Protocol Buffer-based datastructures +generated from OpenAPIv2.proto. + +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic +compiler generator, and OpenAPIv2.pb.go is generated by +protoc, the Protocol Buffer compiler, and protoc-gen-go, the +Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json new file mode 100644 index 00000000000..2815a26ea71 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json @@ -0,0 +1,1610 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for parameters" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + }, + "description": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/googleapis/gnostic/README.md b/vendor/github.com/googleapis/gnostic/README.md new file mode 100644 index 00000000000..d350f3f0134 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/README.md @@ -0,0 +1,103 @@ +[![Build Status](https://travis-ci.org/googleapis/gnostic.svg?branch=master)](https://travis-ci.org/googleapis/gnostic) + +# ⨁ gnostic + +This repository contains a Go command line tool which converts +JSON and YAML [OpenAPI](https://github.com/OAI/OpenAPI-Specification) +descriptions to and from equivalent Protocol Buffer representations. + +[Protocol Buffers](https://developers.google.com/protocol-buffers/) +provide a language-neutral, platform-neutral, extensible mechanism +for serializing structured data. +**gnostic**'s Protocol Buffer models for the OpenAPI Specification +can be used to generate code that includes data structures with +explicit fields for the elements of an OpenAPI description. +This makes it possible for developers to work with OpenAPI +descriptions in type-safe ways, which is particularly useful +in strongly-typed languages like Go and Swift. + +**gnostic** reads OpenAPI descriptions into +these generated data structures, reports errors, +resolves internal dependencies, and writes the results +in a binary form that can be used in any language that is +supported by the Protocol Buffer tools. +A plugin interface simplifies integration with API +tools written in a variety of different languages, +and when necessary, Protocol Buffer OpenAPI descriptions +can be reexported as JSON or YAML. + +**gnostic** compilation code and OpenAPI Protocol Buffer +models are automatically generated from an +[OpenAPI JSON Schema](https://github.com/OAI/OpenAPI-Specification/blob/master/schemas/v2.0/schema.json). +Source code for the generator is in the [generate-gnostic](generate-gnostic) directory. + +## Disclaimer + +This is prerelease software and work in progress. Feedback and +contributions are welcome, but we currently make no guarantees of +function or stability. + +## Requirements + +**gnostic** can be run in any environment that supports [Go](http://golang.org) +and the [Google Protocol Buffer Compiler](https://github.com/google/protobuf). + +## Installation + +1. Get this package by downloading it with `go get`. + + go get github.com/googleapis/gnostic + +2. [Optional] Build and run the compiler generator. +This uses the OpenAPI JSON schema to generate a Protocol Buffer language file +that describes the OpenAPI specification and a Go-language file of code that +will read a JSON or YAML OpenAPI representation into the generated protocol +buffers. Pre-generated versions of these files are in the OpenAPIv2 directory. + + cd $GOPATH/src/github.com/googleapis/gnostic/generate-gnostic + go install + cd .. + generate-gnostic --v2 + +3. [Optional] Generate Protocol Buffer support code. +A pre-generated version of this file is checked into the OpenAPIv2 directory. +This step requires a local installation of protoc, the Protocol Buffer Compiler. +You can get protoc [here](https://github.com/google/protobuf). + + ./COMPILE-PROTOS.sh + +4. [Optional] Rebuild **gnostic**. This is only necessary if you've performed steps +2 or 3 above. + + go install github.com/googleapis/gnostic + +5. Run **gnostic**. This will create a file in the current directory named "petstore.pb" that contains a binary +Protocol Buffer description of a sample API. + + gnostic --pb-out=. examples/petstore.json + +6. You can also compile files that you specify with a URL. Here's another way to compile the previous +example. This time we're creating "petstore.text", which contains a textual representation of the +Protocol Buffer description. This is mainly for use in testing and debugging. + + gnostic --text-out=petstore.text https://raw.githubusercontent.com/googleapis/gnostic/master/examples/petstore.json + +7. For a sample application, see apps/report. + + go install github.com/googleapis/gnostic/apps/report + report petstore.pb + +8. **gnostic** supports plugins. This builds and runs a sample plugin +that reports some basic information about an API. The "-" causes the plugin to +write its output to stdout. + + go install github.com/googleapis/gnostic/plugins/gnostic-go-sample + gnostic examples/petstore.json --go-sample-out=- + +## Copyright + +Copyright 2017, Google Inc. + +## License + +Released under the Apache 2.0 license. diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md new file mode 100644 index 00000000000..848b16c69b8 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -0,0 +1,3 @@ +# Compiler support code + +This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go new file mode 100644 index 00000000000..a64c1b75db4 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -0,0 +1,43 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Context contains state of the compiler as it traverses a document. +type Context struct { + Parent *Context + Name string + ExtensionHandlers *[]ExtensionHandler +} + +// NewContextWithExtensions returns a new object representing the compiler state +func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} +} + +// NewContext returns a new object representing the compiler state +func NewContext(name string, parent *Context) *Context { + if parent != nil { + return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + } + return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} +} + +// Description returns a text description of the compiler state +func (context *Context) Description() string { + if context.Parent != nil { + return context.Parent.Description() + "." + context.Name + } + return context.Name +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go new file mode 100644 index 00000000000..d8672c1008b --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -0,0 +1,61 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Error represents compiler errors and their location in the document. +type Error struct { + Context *Context + Message string +} + +// NewError creates an Error. +func NewError(context *Context, message string) *Error { + return &Error{Context: context, Message: message} +} + +// Error returns the string value of an Error. +func (err *Error) Error() string { + if err.Context == nil { + return "ERROR " + err.Message + } + return "ERROR " + err.Context.Description() + " " + err.Message +} + +// ErrorGroup is a container for groups of Error values. +type ErrorGroup struct { + Errors []error +} + +// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. +func NewErrorGroupOrNil(errors []error) error { + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } else { + return &ErrorGroup{Errors: errors} + } +} + +func (group *ErrorGroup) Error() string { + result := "" + for i, err := range group.Errors { + if i > 0 { + result += "\n" + } + result += err.Error() + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go new file mode 100644 index 00000000000..1f85b650e81 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go @@ -0,0 +1,101 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + + "strings" + + "errors" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + ext_plugin "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v2" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// HandleExtension calls a binary extension handler. +func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { + handled := false + var errFromPlugin error + var outFromPlugin *any.Any + + if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { + for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { + outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) + if outFromPlugin == nil { + continue + } else { + handled = true + break + } + } + } + return handled, outFromPlugin, errFromPlugin +} + +func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + binary, _ := yaml.Marshal(in) + + request := &ext_plugin.ExtensionHandlerRequest{} + + version := &ext_plugin.Version{} + version.Major = 0 + version.Minor = 1 + version.Patch = 0 + request.CompilerVersion = version + + request.Wrapper = &ext_plugin.Wrapper{} + + request.Wrapper.Version = "v2" + request.Wrapper.Yaml = string(binary) + request.Wrapper.ExtensionName = extensionName + + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + + if err != nil { + fmt.Printf("Error: %+v\n", err) + return nil, err + } + response := &ext_plugin.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil { + fmt.Printf("Error: %+v\n", err) + fmt.Printf("%s\n", string(output)) + return nil, err + } + if !response.Handled { + return nil, nil + } + if len(response.Error) != 0 { + message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) + return nil, errors.New(message) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go new file mode 100644 index 00000000000..76df635ff9b --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -0,0 +1,197 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "regexp" + "sort" + "strconv" +) + +// compiler helper functions, usually called from generated code + +// UnpackMap gets a yaml.MapSlice if possible. +func UnpackMap(in interface{}) (yaml.MapSlice, bool) { + m, ok := in.(yaml.MapSlice) + if ok { + return m, true + } + // do we have an empty array? + a, ok := in.([]interface{}) + if ok && len(a) == 0 { + // if so, return an empty map + return yaml.MapSlice{}, true + } + return nil, false +} + +// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. +func SortedKeysForMap(m yaml.MapSlice) []string { + keys := make([]string, 0) + for _, item := range m { + keys = append(keys, item.Key.(string)) + } + sort.Strings(keys) + return keys +} + +// MapHasKey returns true if a yaml.MapSlice contains a specified key. +func MapHasKey(m yaml.MapSlice, key string) bool { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return true + } + } + return false +} + +// MapValueForKey gets the value of a map value for a specified key. +func MapValueForKey(m yaml.MapSlice, key string) interface{} { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return item.Value + } + } + return nil +} + +// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. +func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { + stringArray := make([]string, 0) + for _, item := range interfaceArray { + v, ok := item.(string) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// MissingKeysInMap identifies which keys from a list of required keys are not in a map. +func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { + missingKeys := make([]string, 0) + for _, k := range requiredKeys { + if !MapHasKey(m, k) { + missingKeys = append(missingKeys, k) + } + } + return missingKeys +} + +// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. +func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { + invalidKeys := make([]string, 0) + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok { + key := itemKey + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { + found = true + break + } + } + if !found { + invalidKeys = append(invalidKeys, key) + } + } + } + } + return invalidKeys +} + +// DescribeMap describes a map (for debugging purposes). +func DescribeMap(in interface{}, indent string) string { + description := "" + m, ok := in.(map[string]interface{}) + if ok { + keys := make([]string, 0) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := m[k] + description += fmt.Sprintf("%s%s:\n", indent, k) + description += DescribeMap(v, indent+" ") + } + return description + } + a, ok := in.([]interface{}) + if ok { + for i, v := range a { + description += fmt.Sprintf("%s%d:\n", indent, i) + description += DescribeMap(v, indent+" ") + } + return description + } + description += fmt.Sprintf("%s%+v\n", indent, in) + return description +} + +// PluralProperties returns the string "properties" pluralized. +func PluralProperties(count int) string { + if count == 1 { + return "property" + } + return "properties" +} + +// StringArrayContainsValue returns true if a string array contains a specified value. +func StringArrayContainsValue(array []string, value string) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +// StringArrayContainsValues returns true if a string array contains all of a list of specified values. +func StringArrayContainsValues(array []string, values []string) bool { + for _, value := range values { + if !StringArrayContainsValue(array, value) { + return false + } + } + return true +} + +// StringValue returns the string value of an item. +func StringValue(item interface{}) (value string, ok bool) { + value, ok = item.(string) + if ok { + return value, ok + } + intValue, ok := item.(int) + if ok { + return strconv.Itoa(intValue), true + } + return "", false +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go new file mode 100644 index 00000000000..9713a21cca2 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compiler provides support functions to generated compiler code. +package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go new file mode 100644 index 00000000000..604a46a6a1e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -0,0 +1,167 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "io/ioutil" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" +) + +var fileCache map[string][]byte +var infoCache map[string]interface{} +var count int64 + +var verboseReader = false + +func initializeFileCache() { + if fileCache == nil { + fileCache = make(map[string][]byte, 0) + } +} + +func initializeInfoCache() { + if infoCache == nil { + infoCache = make(map[string]interface{}, 0) + } +} + +// FetchFile gets a specified file from the local filesystem or a remote location. +func FetchFile(fileurl string) ([]byte, error) { + initializeFileCache() + bytes, ok := fileCache[fileurl] + if ok { + if verboseReader { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } + log.Printf("Fetching %s", fileurl) + response, err := http.Get(fileurl) + if err != nil { + return nil, err + } + defer response.Body.Close() + bytes, err = ioutil.ReadAll(response.Body) + if err == nil { + fileCache[fileurl] = bytes + } + return bytes, err +} + +// ReadBytesForFile reads the bytes of a file. +func ReadBytesForFile(filename string) ([]byte, error) { + // is the filename a url? + fileurl, _ := url.Parse(filename) + if fileurl.Scheme != "" { + // yes, fetch it + bytes, err := FetchFile(filename) + if err != nil { + return nil, err + } + return bytes, nil + } + // no, it's a local filename + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. +func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { + initializeInfoCache() + cachedInfo, ok := infoCache[filename] + if ok { + if verboseReader { + log.Printf("Cache hit info for file %s", filename) + } + return cachedInfo, nil + } + if verboseReader { + log.Printf("Reading info for file %s", filename) + } + var info yaml.MapSlice + err := yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + infoCache[filename] = info + return info, nil +} + +// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. +func ReadInfoForRef(basefile string, ref string) (interface{}, error) { + initializeInfoCache() + { + info, ok := infoCache[ref] + if ok { + if verboseReader { + log.Printf("Cache hit for ref %s#%s", basefile, ref) + } + return info, nil + } + } + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } + count = count + 1 + basedir, _ := filepath.Split(basefile) + parts := strings.Split(ref, "#") + var filename string + if parts[0] != "" { + filename = basedir + parts[0] + } else { + filename = basefile + } + bytes, err := ReadBytesForFile(filename) + if err != nil { + return nil, err + } + info, err := ReadInfoFromBytes(filename, bytes) + if err != nil { + log.Printf("File error: %v\n", err) + } else { + if len(parts) > 1 { + path := strings.Split(parts[1], "/") + for i, key := range path { + if i > 0 { + m, ok := info.(yaml.MapSlice) + if ok { + found := false + for _, section := range m { + if section.Key == key { + info = section.Value + found = true + } + } + if !found { + infoCache[ref] = nil + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + } + } + } + } + } + infoCache[ref] = info + return info, nil +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh new file mode 100755 index 00000000000..68d02a02ac5 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh @@ -0,0 +1,5 @@ +go get github.com/golang/protobuf/protoc-gen-go + +protoc \ +--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto + diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md new file mode 100644 index 00000000000..ff1c2eb1e3a --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -0,0 +1,5 @@ +# Extensions + +This directory contains support code for building Gnostic extensions and associated examples. + +Extensions are used to compile vendor or specification extensions into protocol buffer structures. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go new file mode 100644 index 00000000000..b14f1f945fc --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -0,0 +1,219 @@ +// Code generated by protoc-gen-go. +// source: extension.proto +// DO NOT EDIT! + +/* +Package openapiextension_v1 is a generated protocol buffer package. + +It is generated from these files: + extension.proto + +It has these top-level messages: + Version + ExtensionHandlerRequest + ExtensionHandlerResponse + Wrapper +*/ +package openapiextension_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of OpenAPI compiler. +type Version struct { + Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Version) GetMajor() int32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil { + return m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil { + return m.Suffix + } + return "" +} + +// An encoded Request is written to the ExtensionHandler's stdin. +type ExtensionHandlerRequest struct { + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to openapic. + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` + // The version number of openapi compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } +func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerRequest) ProtoMessage() {} +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if m != nil { + return m.Wrapper + } + return nil +} + +func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +type ExtensionHandlerResponse struct { + // true if the extension is handled by the extension handler; false otherwise + Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` + // text output + Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` +} + +func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } +func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerResponse) ProtoMessage() {} +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ExtensionHandlerResponse) GetHandled() bool { + if m != nil { + return m.Handled + } + return false +} + +func (m *ExtensionHandlerResponse) GetError() []string { + if m != nil { + return m.Error + } + return nil +} + +func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +type Wrapper struct { + // version of the OpenAPI specification in which this extension was written. + Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // Name of the extension + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` + // Must be a valid yaml for the proto + Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Wrapper) Reset() { *m = Wrapper{} } +func (m *Wrapper) String() string { return proto.CompactTextString(m) } +func (*Wrapper) ProtoMessage() {} +func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Wrapper) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Wrapper) GetExtensionName() string { + if m != nil { + return m.ExtensionName + } + return "" +} + +func (m *Wrapper) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") + proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") + proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") + proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") +} + +func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 355 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, + 0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, + 0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, + 0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, + 0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4, + 0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4, + 0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0, + 0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34, + 0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a, + 0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a, + 0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66, + 0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2, + 0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d, + 0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3, + 0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c, + 0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa, + 0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab, + 0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3, + 0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62, + 0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23, + 0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1, + 0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52, + 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto new file mode 100644 index 00000000000..806760a1329 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -0,0 +1,93 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +package openapiextension.v1; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIExtensionV1"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapic.v1"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +// +option objc_class_prefix = "OAE"; // "OpenAPI Extension" + +// The version number of OpenAPI compiler. +message Version { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + string suffix = 4; +} + +// An encoded Request is written to the ExtensionHandler's stdin. +message ExtensionHandlerRequest { + + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to openapic. + Wrapper wrapper = 1; + + // The version number of openapi compiler. + Version compiler_version = 3; +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +message ExtensionHandlerResponse { + + // true if the extension is handled by the extension handler; false otherwise + bool handled = 1; + + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + repeated string error = 2; + + // text output + google.protobuf.Any value = 3; +} + +message Wrapper { + // version of the OpenAPI specification in which this extension was written. + string version = 1; + + // Name of the extension + string extension_name = 2; + + // Must be a valid yaml for the proto + string yaml = 3; +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go new file mode 100644 index 00000000000..94a8e62a776 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -0,0 +1,82 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapiextension_v1 + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" +) + +type documentHandler func(version string, extensionName string, document string) +type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) + +func forInputYamlFromOpenapic(handler documentHandler) { + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + fmt.Println("File error:", err.Error()) + os.Exit(1) + } + if len(data) == 0 { + fmt.Println("No input data.") + os.Exit(1) + } + request := &ExtensionHandlerRequest{} + err = proto.Unmarshal(data, request) + if err != nil { + fmt.Println("Input error:", err.Error()) + os.Exit(1) + } + handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) +} + +// ProcessExtension calles the handler for a specified extension. +func ProcessExtension(handleExtension extensionHandler) { + response := &ExtensionHandlerResponse{} + forInputYamlFromOpenapic( + func(version string, extensionName string, yamlInput string) { + var newObject proto.Message + var err error + + handled, newObject, err := handleExtension(extensionName, yamlInput) + if !handled { + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + + // If we reach here, then the extension is handled + response.Handled = true + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + response.Value, err = ptypes.MarshalAny(newObject) + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + }) + + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) +} diff --git a/vendor/github.com/googleapis/gnostic/gnostic.go b/vendor/github.com/googleapis/gnostic/gnostic.go new file mode 100644 index 00000000000..d2670cdea91 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/gnostic.go @@ -0,0 +1,556 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./COMPILE-PROTOS.sh + +// Gnostic is a tool for building better REST APIs through knowledge. +// +// Gnostic reads declarative descriptions of REST APIs that conform +// to the OpenAPI Specification, reports errors, resolves internal +// dependencies, and puts the results in a binary form that can +// be used in any language that is supported by the Protocol Buffer +// tools. +// +// Gnostic models are validated and typed. This allows API tool +// developers to focus on their product and not worry about input +// validation and type checking. +// +// Gnostic calls plugins that implement a variety of API implementation +// and support features including generation of client and server +// support code. +package main + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/googleapis/gnostic/OpenAPIv2" + "github.com/googleapis/gnostic/OpenAPIv3" + "github.com/googleapis/gnostic/compiler" + "github.com/googleapis/gnostic/jsonwriter" + plugins "github.com/googleapis/gnostic/plugins" + "gopkg.in/yaml.v2" +) + +const ( // OpenAPI Version + openAPIvUnknown = 0 + openAPIv2 = 2 + openAPIv3 = 3 +) + +// Determine the version of an OpenAPI description read from JSON or YAML. +func getOpenAPIVersionFromInfo(info interface{}) int { + m, ok := compiler.UnpackMap(info) + if !ok { + return openAPIvUnknown + } + swagger, ok := compiler.MapValueForKey(m, "swagger").(string) + if ok && strings.HasPrefix(swagger, "2.0") { + return openAPIv2 + } + openapi, ok := compiler.MapValueForKey(m, "openapi").(string) + if ok && strings.HasPrefix(openapi, "3.0") { + return openAPIv3 + } + return openAPIvUnknown +} + +const ( + pluginPrefix = "gnostic-" + extensionPrefix = "gnostic-x-" +) + +type pluginCall struct { + Name string + Invocation string +} + +// Invokes a plugin. +func (p *pluginCall) perform(document proto.Message, openAPIVersion int, sourceName string) error { + if p.Name != "" { + request := &plugins.Request{} + + // Infer the name of the executable by adding the prefix. + executableName := pluginPrefix + p.Name + + // Validate invocation string with regular expression. + invocation := p.Invocation + + // + // Plugin invocations must consist of + // zero or more comma-separated key=value pairs followed by a path. + // If pairs are present, a colon separates them from the path. + // Keys and values must be alphanumeric strings and may contain + // dashes, underscores, periods, or forward slashes. + // A path can contain any characters other than the separators ',', ':', and '='. + // + invocationRegex := regexp.MustCompile(`^([\w-_\/\.]+=[\w-_\/\.]+(,[\w-_\/\.]+=[\w-_\/\.]+)*:)?[^,:=]+$`) + if !invocationRegex.Match([]byte(p.Invocation)) { + return fmt.Errorf("Invalid invocation of %s: %s", executableName, invocation) + } + + invocationParts := strings.Split(p.Invocation, ":") + var outputLocation string + switch len(invocationParts) { + case 1: + outputLocation = invocationParts[0] + case 2: + parameters := strings.Split(invocationParts[0], ",") + for _, keyvalue := range parameters { + pair := strings.Split(keyvalue, "=") + if len(pair) == 2 { + request.Parameters = append(request.Parameters, &plugins.Parameter{Name: pair[0], Value: pair[1]}) + } + } + outputLocation = invocationParts[1] + default: + // badly-formed request + outputLocation = invocationParts[len(invocationParts)-1] + } + + version := &plugins.Version{} + version.Major = 0 + version.Minor = 1 + version.Patch = 0 + request.CompilerVersion = version + + request.OutputPath = outputLocation + + wrapper := &plugins.Wrapper{} + wrapper.Name = sourceName + switch openAPIVersion { + case openAPIv2: + wrapper.Version = "v2" + case openAPIv3: + wrapper.Version = "v3" + default: + wrapper.Version = "unknown" + } + protoBytes, _ := proto.Marshal(document) + wrapper.Value = protoBytes + request.Wrapper = wrapper + requestBytes, _ := proto.Marshal(request) + + cmd := exec.Command(executableName) + cmd.Stdin = bytes.NewReader(requestBytes) + cmd.Stderr = os.Stderr + output, err := cmd.Output() + if err != nil { + return err + } + response := &plugins.Response{} + err = proto.Unmarshal(output, response) + if err != nil { + return err + } + + if response.Errors != nil { + return fmt.Errorf("Plugin error: %+v", response.Errors) + } + + // Write files to the specified directory. + var writer io.Writer + switch { + case outputLocation == "!": + // Write nothing. + case outputLocation == "-": + writer = os.Stdout + for _, file := range response.Files { + writer.Write([]byte("\n\n" + file.Name + " -------------------- \n")) + writer.Write(file.Data) + } + case isFile(outputLocation): + return fmt.Errorf("unable to overwrite %s", outputLocation) + default: // write files into a directory named by outputLocation + if !isDirectory(outputLocation) { + os.Mkdir(outputLocation, 0755) + } + for _, file := range response.Files { + p := outputLocation + "/" + file.Name + dir := path.Dir(p) + os.MkdirAll(dir, 0755) + f, _ := os.Create(p) + defer f.Close() + f.Write(file.Data) + } + } + } + return nil +} + +func isFile(path string) bool { + fileInfo, err := os.Stat(path) + if err != nil { + return false + } + return !fileInfo.IsDir() +} + +func isDirectory(path string) bool { + fileInfo, err := os.Stat(path) + if err != nil { + return false + } + return fileInfo.IsDir() +} + +// Write bytes to a named file. +// Certain names have special meaning: +// ! writes nothing +// - writes to stdout +// = writes to stderr +// If a directory name is given, the file is written there with +// a name derived from the source and extension arguments. +func writeFile(name string, bytes []byte, source string, extension string) { + var writer io.Writer + if name == "!" { + return + } else if name == "-" { + writer = os.Stdout + } else if name == "=" { + writer = os.Stderr + } else if isDirectory(name) { + base := filepath.Base(source) + // Remove the original source extension. + base = base[0 : len(base)-len(filepath.Ext(base))] + // Build the path that puts the result in the passed-in directory. + filename := name + "/" + base + "." + extension + file, _ := os.Create(filename) + defer file.Close() + writer = file + } else { + file, _ := os.Create(name) + defer file.Close() + writer = file + } + writer.Write(bytes) + if name == "-" || name == "=" { + writer.Write([]byte("\n")) + } +} + +// The Gnostic structure holds global state information for gnostic. +type Gnostic struct { + usage string + sourceName string + binaryOutputPath string + textOutputPath string + yamlOutputPath string + jsonOutputPath string + errorOutputPath string + resolveReferences bool + pluginCalls []*pluginCall + extensionHandlers []compiler.ExtensionHandler + openAPIVersion int +} + +// Initialize a structure to store global application state. +func newGnostic() *Gnostic { + g := &Gnostic{} + // Option fields initialize to their default values. + g.usage = ` +Usage: gnostic OPENAPI_SOURCE [OPTIONS] + OPENAPI_SOURCE is the filename or URL of an OpenAPI description to read. +Options: + --pb-out=PATH Write a binary proto to the specified location. + --text-out=PATH Write a text proto to the specified location. + --json-out=PATH Write a json API description to the specified location. + --yaml-out=PATH Write a yaml API description to the specified location. + --errors-out=PATH Write compilation errors to the specified location. + --PLUGIN-out=PATH Run the plugin named gnostic_PLUGIN and write results + to the specified location. + --x-EXTENSION Use the extension named gnostic-x-EXTENSION + to process OpenAPI specification extensions. + --resolve-refs Explicitly resolve $ref references. + This could have problems with recursive definitions. +` + // Initialize internal structures. + g.pluginCalls = make([]*pluginCall, 0) + g.extensionHandlers = make([]compiler.ExtensionHandler, 0) + return g +} + +// Parse command-line options. +func (g *Gnostic) readOptions() { + // plugin processing matches patterns of the form "--PLUGIN-out=PATH" and "--PLUGIN_out=PATH" + pluginRegex := regexp.MustCompile("--(.+)[-_]out=(.+)") + + // extension processing matches patterns of the form "--x-EXTENSION" + extensionRegex := regexp.MustCompile("--x-(.+)") + + for i, arg := range os.Args { + if i == 0 { + continue // skip the tool name + } + var m [][]byte + if m = pluginRegex.FindSubmatch([]byte(arg)); m != nil { + pluginName := string(m[1]) + invocation := string(m[2]) + switch pluginName { + case "pb": + g.binaryOutputPath = invocation + case "text": + g.textOutputPath = invocation + case "json": + g.jsonOutputPath = invocation + case "yaml": + g.yamlOutputPath = invocation + case "errors": + g.errorOutputPath = invocation + default: + p := &pluginCall{Name: pluginName, Invocation: invocation} + g.pluginCalls = append(g.pluginCalls, p) + } + } else if m = extensionRegex.FindSubmatch([]byte(arg)); m != nil { + extensionName := string(m[1]) + extensionHandler := compiler.ExtensionHandler{Name: extensionPrefix + extensionName} + g.extensionHandlers = append(g.extensionHandlers, extensionHandler) + } else if arg == "--resolve-refs" { + g.resolveReferences = true + } else if arg[0] == '-' { + fmt.Fprintf(os.Stderr, "Unknown option: %s.\n%s\n", arg, g.usage) + os.Exit(-1) + } else { + g.sourceName = arg + } + } +} + +// Validate command-line options. +func (g *Gnostic) validateOptions() { + if g.binaryOutputPath == "" && + g.textOutputPath == "" && + g.yamlOutputPath == "" && + g.jsonOutputPath == "" && + g.errorOutputPath == "" && + len(g.pluginCalls) == 0 { + fmt.Fprintf(os.Stderr, "Missing output directives.\n%s\n", g.usage) + os.Exit(-1) + } + if g.sourceName == "" { + fmt.Fprintf(os.Stderr, "No input specified.\n%s\n", g.usage) + os.Exit(-1) + } + // If we get here and the error output is unspecified, write errors to stderr. + if g.errorOutputPath == "" { + g.errorOutputPath = "=" + } +} + +// Generate an error message to be written to stderr or a file. +func (g *Gnostic) errorBytes(err error) []byte { + return []byte("Errors reading " + g.sourceName + "\n" + err.Error()) +} + +// Read an OpenAPI description from YAML or JSON. +func (g *Gnostic) readOpenAPIText(bytes []byte) (message proto.Message, err error) { + info, err := compiler.ReadInfoFromBytes(g.sourceName, bytes) + if err != nil { + return nil, err + } + // Determine the OpenAPI version. + g.openAPIVersion = getOpenAPIVersionFromInfo(info) + if g.openAPIVersion == openAPIvUnknown { + return nil, errors.New("unable to identify OpenAPI version") + } + // Compile to the proto model. + if g.openAPIVersion == openAPIv2 { + document, err := openapi_v2.NewDocument(info, compiler.NewContextWithExtensions("$root", nil, &g.extensionHandlers)) + if err != nil { + return nil, err + } + message = document + } else if g.openAPIVersion == openAPIv3 { + document, err := openapi_v3.NewDocument(info, compiler.NewContextWithExtensions("$root", nil, &g.extensionHandlers)) + if err != nil { + return nil, err + } + message = document + } + return message, err +} + +// Read an OpenAPI binary file. +func (g *Gnostic) readOpenAPIBinary(data []byte) (message proto.Message, err error) { + // try to read an OpenAPI v3 document + documentV3 := &openapi_v3.Document{} + err = proto.Unmarshal(data, documentV3) + if err == nil && strings.HasPrefix(documentV3.Openapi, "3.0") { + g.openAPIVersion = openAPIv3 + return documentV3, nil + } + // if that failed, try to read an OpenAPI v2 document + documentV2 := &openapi_v2.Document{} + err = proto.Unmarshal(data, documentV2) + if err == nil && strings.HasPrefix(documentV2.Swagger, "2.0") { + g.openAPIVersion = openAPIv2 + return documentV2, nil + } + return nil, err +} + +// Write a binary pb representation. +func (g *Gnostic) writeBinaryOutput(message proto.Message) { + protoBytes, err := proto.Marshal(message) + if err != nil { + writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") + defer os.Exit(-1) + } else { + writeFile(g.binaryOutputPath, protoBytes, g.sourceName, "pb") + } +} + +// Write a text pb representation. +func (g *Gnostic) writeTextOutput(message proto.Message) { + bytes := []byte(proto.MarshalTextString(message)) + writeFile(g.textOutputPath, bytes, g.sourceName, "text") +} + +// Write JSON/YAML OpenAPI representations. +func (g *Gnostic) writeJSONYAMLOutput(message proto.Message) { + // Convert the OpenAPI document into an exportable MapSlice. + var rawInfo yaml.MapSlice + var ok bool + var err error + if g.openAPIVersion == openAPIv2 { + document := message.(*openapi_v2.Document) + rawInfo, ok = document.ToRawInfo().(yaml.MapSlice) + if !ok { + rawInfo = nil + } + } else if g.openAPIVersion == openAPIv3 { + document := message.(*openapi_v3.Document) + rawInfo, ok = document.ToRawInfo().(yaml.MapSlice) + if !ok { + rawInfo = nil + } + } + // Optionally write description in yaml format. + if g.yamlOutputPath != "" { + var bytes []byte + if rawInfo != nil { + bytes, err = yaml.Marshal(rawInfo) + if err != nil { + fmt.Fprintf(os.Stderr, "Error generating yaml output %s\n", err.Error()) + } + writeFile(g.yamlOutputPath, bytes, g.sourceName, "yaml") + } else { + fmt.Fprintf(os.Stderr, "No yaml output available.\n") + } + } + // Optionally write description in json format. + if g.jsonOutputPath != "" { + var bytes []byte + if rawInfo != nil { + bytes, _ = jsonwriter.Marshal(rawInfo) + if err != nil { + fmt.Fprintf(os.Stderr, "Error generating json output %s\n", err.Error()) + } + writeFile(g.jsonOutputPath, bytes, g.sourceName, "json") + } else { + fmt.Fprintf(os.Stderr, "No json output available.\n") + } + } +} + +// Perform all actions specified in the command-line options. +func (g *Gnostic) performActions(message proto.Message) (err error) { + // Optionally resolve internal references. + if g.resolveReferences { + if g.openAPIVersion == openAPIv2 { + document := message.(*openapi_v2.Document) + _, err = document.ResolveReferences(g.sourceName) + } else if g.openAPIVersion == openAPIv3 { + document := message.(*openapi_v3.Document) + _, err = document.ResolveReferences(g.sourceName) + } + if err != nil { + return err + } + } + // Optionally write proto in binary format. + if g.binaryOutputPath != "" { + g.writeBinaryOutput(message) + } + // Optionally write proto in text format. + if g.textOutputPath != "" { + g.writeTextOutput(message) + } + // Optionaly write document in yaml and/or json formats. + if g.yamlOutputPath != "" || g.jsonOutputPath != "" { + g.writeJSONYAMLOutput(message) + } + // Call all specified plugins. + for _, p := range g.pluginCalls { + err := p.perform(message, g.openAPIVersion, g.sourceName) + if err != nil { + writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") + defer os.Exit(-1) // run all plugins, even when some have errors + } + } + return nil +} + +func (g *Gnostic) main() { + var err error + g.readOptions() + g.validateOptions() + // Read the OpenAPI source. + bytes, err := compiler.ReadBytesForFile(g.sourceName) + if err != nil { + writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") + os.Exit(-1) + } + extension := strings.ToLower(filepath.Ext(g.sourceName)) + var message proto.Message + if extension == ".json" || extension == ".yaml" { + // Try to read the source as JSON/YAML. + message, err = g.readOpenAPIText(bytes) + if err != nil { + writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") + os.Exit(-1) + } + } else if extension == ".pb" { + // Try to read the source as a binary protocol buffer. + message, err = g.readOpenAPIBinary(bytes) + if err != nil { + writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") + os.Exit(-1) + } + } else { + err = errors.New("unknown file extension. 'json', 'yaml', and 'pb' are accepted") + writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") + os.Exit(-1) + } + // Perform actions specified by command options. + err = g.performActions(message) + if err != nil { + writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") + os.Exit(-1) + } +} + +func main() { + g := newGnostic() + g.main() +} diff --git a/vendor/github.com/googleapis/gnostic/gnostic_test.go b/vendor/github.com/googleapis/gnostic/gnostic_test.go new file mode 100644 index 00000000000..c5ccf6da984 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/gnostic_test.go @@ -0,0 +1,453 @@ +package main + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" +) + +func testCompiler(t *testing.T, inputFile string, referenceFile string, expectErrors bool) { + textFile := strings.Replace(filepath.Base(inputFile), filepath.Ext(inputFile), ".text", 1) + errorsFile := strings.Replace(filepath.Base(inputFile), filepath.Ext(inputFile), ".errors", 1) + // remove any preexisting output files + os.Remove(textFile) + os.Remove(errorsFile) + // run the compiler + var err error + var cmd = exec.Command( + "gnostic", + inputFile, + "--text-out=.", + "--errors-out=.", + "--resolve-refs") + //t.Log(cmd.Args) + err = cmd.Run() + if err != nil && !expectErrors { + t.Logf("Compile failed: %+v", err) + t.FailNow() + } + // verify the output against a reference + var outputFile string + if expectErrors { + outputFile = errorsFile + } else { + outputFile = textFile + } + err = exec.Command("diff", outputFile, referenceFile).Run() + if err != nil { + t.Logf("Diff failed: %+v", err) + t.FailNow() + } else { + // if the test succeeded, clean up + os.Remove(textFile) + os.Remove(errorsFile) + } +} + +func testNormal(t *testing.T, inputFile string, referenceFile string) { + testCompiler(t, inputFile, referenceFile, false) +} + +func testErrors(t *testing.T, inputFile string, referenceFile string) { + testCompiler(t, inputFile, referenceFile, true) +} + +func TestPetstoreJSON(t *testing.T) { + testNormal(t, + "examples/v2.0/json/petstore.json", + "test/v2.0/petstore.text") +} + +func TestPetstoreYAML(t *testing.T) { + testNormal(t, + "examples/v2.0/yaml/petstore.yaml", + "test/v2.0/petstore.text") +} + +func TestSeparateYAML(t *testing.T) { + testNormal(t, + "examples/v2.0/yaml/petstore-separate/spec/swagger.yaml", + "test/v2.0/yaml/petstore-separate/spec/swagger.text") +} + +func TestSeparateJSON(t *testing.T) { + testNormal(t, + "examples/v2.0/json/petstore-separate/spec/swagger.json", + "test/v2.0/yaml/petstore-separate/spec/swagger.text") // yaml and json results should be identical +} + +func TestRemotePetstoreJSON(t *testing.T) { + testNormal(t, + "https://raw.githubusercontent.com/googleapis/openapi-compiler/master/examples/v2.0/json/petstore.json", + "test/v2.0/petstore.text") +} + +func TestRemotePetstoreYAML(t *testing.T) { + testNormal(t, + "https://raw.githubusercontent.com/googleapis/openapi-compiler/master/examples/v2.0/yaml/petstore.yaml", + "test/v2.0/petstore.text") +} + +func TestRemoteSeparateYAML(t *testing.T) { + testNormal(t, + "https://raw.githubusercontent.com/googleapis/openapi-compiler/master/examples/v2.0/yaml/petstore-separate/spec/swagger.yaml", + "test/v2.0/yaml/petstore-separate/spec/swagger.text") +} + +func TestRemoteSeparateJSON(t *testing.T) { + testNormal(t, + "https://raw.githubusercontent.com/googleapis/openapi-compiler/master/examples/v2.0/json/petstore-separate/spec/swagger.json", + "test/v2.0/yaml/petstore-separate/spec/swagger.text") +} + +func TestErrorBadProperties(t *testing.T) { + testErrors(t, + "examples/errors/petstore-badproperties.yaml", + "test/errors/petstore-badproperties.errors") +} + +func TestErrorUnresolvedRefs(t *testing.T) { + testErrors(t, + "examples/errors/petstore-unresolvedrefs.yaml", + "test/errors/petstore-unresolvedrefs.errors") +} + +func TestErrorMissingVersion(t *testing.T) { + testErrors(t, + "examples/errors/petstore-missingversion.yaml", + "test/errors/petstore-missingversion.errors") +} + +func testPlugin(t *testing.T, plugin string, inputFile string, outputFile string, referenceFile string) { + // remove any preexisting output files + os.Remove(outputFile) + // run the compiler + var err error + output, err := exec.Command( + "gnostic", + "--"+plugin+"-out=-", + inputFile).Output() + if err != nil { + t.Logf("Compile failed: %+v", err) + t.FailNow() + } + _ = ioutil.WriteFile(outputFile, output, 0644) + err = exec.Command("diff", outputFile, referenceFile).Run() + if err != nil { + t.Logf("Diff failed: %+v", err) + t.FailNow() + } else { + // if the test succeeded, clean up + os.Remove(outputFile) + } +} + +func TestSamplePluginWithPetstore(t *testing.T) { + testPlugin(t, + "go-sample", + "examples/v2.0/yaml/petstore.yaml", + "sample-petstore.out", + "test/v2.0/yaml/sample-petstore.out") +} + +func TestErrorInvalidPluginInvocations(t *testing.T) { + var err error + output, err := exec.Command( + "gnostic", + "examples/v2.0/yaml/petstore.yaml", + "--errors-out=-", + "--plugin-out=foo=bar,:abc", + "--plugin-out=,foo=bar:abc", + "--plugin-out=foo=:abc", + "--plugin-out==bar:abc", + "--plugin-out=,,:abc", + "--plugin-out=foo=bar=baz:abc", + ).Output() + if err == nil { + t.Logf("Invalid invocations were accepted") + t.FailNow() + } + outputFile := "invalid-plugin-invocation.errors" + _ = ioutil.WriteFile(outputFile, output, 0644) + err = exec.Command("diff", outputFile, "test/errors/invalid-plugin-invocation.errors").Run() + if err != nil { + t.Logf("Diff failed: %+v", err) + t.FailNow() + } else { + // if the test succeeded, clean up + os.Remove(outputFile) + } +} + +func TestValidPluginInvocations(t *testing.T) { + var err error + output, err := exec.Command( + "gnostic", + "examples/v2.0/yaml/petstore.yaml", + "--errors-out=-", + // verify an invocation with no parameters + "--go-sample-out=!", // "!" indicates that no output should be generated + // verify single pair of parameters + "--go-sample-out=a=b:!", + // verify multiple parameters + "--go-sample-out=a=b,c=123,xyz=alphabetagammadelta:!", + // verify that special characters / . - _ can be included in parameter keys and values + "--go-sample-out=a/b/c=x/y/z:!", + "--go-sample-out=a.b.c=x.y.z:!", + "--go-sample-out=a-b-c=x-y-z:!", + "--go-sample-out=a_b_c=x_y_z:!", + ).Output() + if len(output) != 0 { + t.Logf("Valid invocations generated invalid errors\n%s", string(output)) + t.FailNow() + } + if err != nil { + t.Logf("Valid invocations were not accepted") + t.FailNow() + } +} + +func TestExtensionHandlerWithLibraryExample(t *testing.T) { + outputFile := "library-example-with-ext.text.out" + inputFile := "test/library-example-with-ext.json" + referenceFile := "test/library-example-with-ext.text.out" + + os.Remove(outputFile) + // run the compiler + var err error + + command := exec.Command( + "gnostic", + "--x-sampleone", + "--x-sampletwo", + "--text-out="+outputFile, + "--resolve-refs", + inputFile) + + _, err = command.Output() + if err != nil { + t.Logf("Compile failed for command %v: %+v", command, err) + t.FailNow() + } + //_ = ioutil.WriteFile(outputFile, output, 0644) + err = exec.Command("diff", outputFile, referenceFile).Run() + if err != nil { + t.Logf("Diff failed: %+v", err) + t.FailNow() + } else { + // if the test succeeded, clean up + os.Remove(outputFile) + } +} + +func TestJSONOutput(t *testing.T) { + inputFile := "test/library-example-with-ext.json" + + textFile := "sample.text" + jsonFile := "sample.json" + textFile2 := "sample2.text" + jsonFile2 := "sample2.json" + + os.Remove(textFile) + os.Remove(jsonFile) + os.Remove(textFile2) + os.Remove(jsonFile2) + + var err error + + // Run the compiler once. + command := exec.Command( + "gnostic", + "--text-out="+textFile, + "--json-out="+jsonFile, + inputFile) + _, err = command.Output() + if err != nil { + t.Logf("Compile failed for command %v: %+v", command, err) + t.FailNow() + } + + // Run the compiler again, this time on the generated output. + command = exec.Command( + "gnostic", + "--text-out="+textFile2, + "--json-out="+jsonFile2, + jsonFile) + _, err = command.Output() + if err != nil { + t.Logf("Compile failed for command %v: %+v", command, err) + t.FailNow() + } + + // Verify that both models have the same internal representation. + err = exec.Command("diff", textFile, textFile2).Run() + if err != nil { + t.Logf("Diff failed: %+v", err) + t.FailNow() + } else { + // if the test succeeded, clean up + os.Remove(textFile) + os.Remove(jsonFile) + os.Remove(textFile2) + os.Remove(jsonFile2) + } +} + +func TestYAMLOutput(t *testing.T) { + inputFile := "test/library-example-with-ext.json" + + textFile := "sample.text" + yamlFile := "sample.yaml" + textFile2 := "sample2.text" + yamlFile2 := "sample2.yaml" + + os.Remove(textFile) + os.Remove(yamlFile) + os.Remove(textFile2) + os.Remove(yamlFile2) + + var err error + + // Run the compiler once. + command := exec.Command( + "gnostic", + "--text-out="+textFile, + "--yaml-out="+yamlFile, + inputFile) + _, err = command.Output() + if err != nil { + t.Logf("Compile failed for command %v: %+v", command, err) + t.FailNow() + } + + // Run the compiler again, this time on the generated output. + command = exec.Command( + "gnostic", + "--text-out="+textFile2, + "--yaml-out="+yamlFile2, + yamlFile) + _, err = command.Output() + if err != nil { + t.Logf("Compile failed for command %v: %+v", command, err) + t.FailNow() + } + + // Verify that both models have the same internal representation. + err = exec.Command("diff", textFile, textFile2).Run() + if err != nil { + t.Logf("Diff failed: %+v", err) + t.FailNow() + } else { + // if the test succeeded, clean up + os.Remove(textFile) + os.Remove(yamlFile) + os.Remove(textFile2) + os.Remove(yamlFile2) + } +} + +func testBuilder(version string, t *testing.T) { + var err error + + pbFile := "petstore-" + version + ".pb" + yamlFile := "petstore.yaml" + jsonFile := "petstore.json" + textFile := "petstore.text" + textReference := "test/" + version + ".0/petstore.text" + + os.Remove(pbFile) + os.Remove(textFile) + os.Remove(yamlFile) + os.Remove(jsonFile) + + // Generate petstore.pb. + command := exec.Command( + "petstore-builder", + "--"+version) + _, err = command.Output() + if err != nil { + t.Logf("Command %v failed: %+v", command, err) + t.FailNow() + } + + // Convert petstore.pb to yaml and json. + command = exec.Command( + "gnostic", + pbFile, + "--json-out="+jsonFile, + "--yaml-out="+yamlFile) + _, err = command.Output() + if err != nil { + t.Logf("Command %v failed: %+v", command, err) + t.FailNow() + } + + // Read petstore.yaml, resolve references, and export text. + command = exec.Command( + "gnostic", + yamlFile, + "--resolve-refs", + "--text-out="+textFile) + _, err = command.Output() + if err != nil { + t.Logf("Command %v failed: %+v", command, err) + t.FailNow() + } + + // Verify that the generated text matches our reference. + err = exec.Command("diff", textFile, textReference).Run() + if err != nil { + t.Logf("Diff failed: %+v", err) + t.FailNow() + } + + // Read petstore.json, resolve references, and export text. + command = exec.Command( + "gnostic", + jsonFile, + "--resolve-refs", + "--text-out="+textFile) + _, err = command.Output() + if err != nil { + t.Logf("Command %v failed: %+v", command, err) + t.FailNow() + } + + // Verify that the generated text matches our reference. + err = exec.Command("diff", textFile, textReference).Run() + if err != nil { + t.Logf("Diff failed: %+v", err) + t.FailNow() + } + + // if the test succeeded, clean up + os.Remove(pbFile) + os.Remove(textFile) + os.Remove(yamlFile) + os.Remove(jsonFile) +} + +func TestBuilderV2(t *testing.T) { + testBuilder("v2", t) +} + +func TestBuilderV3(t *testing.T) { + testBuilder("v3", t) +} + +// OpenAPI 3.0 tests + +func TestPetstoreYAML_30(t *testing.T) { + testNormal(t, + "examples/v3.0/yaml/petstore.yaml", + "test/v3.0/petstore.text") +} + +func TestPetstoreJSON_30(t *testing.T) { + testNormal(t, + "examples/v3.0/json/petstore.json", + "test/v3.0/petstore.text") +} diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml new file mode 100644 index 00000000000..2bca4c599fd --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/.travis.yml @@ -0,0 +1,18 @@ +sudo: false +language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 00000000000..81316beb0cb --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md new file mode 100644 index 00000000000..61bd830e57c --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -0,0 +1,24 @@ +httpcache +========= + +[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) + +Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses. + +It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). + +Cache Backends +-------------- + +- The built-in 'memory' cache stores responses in an in-memory map. +- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. +- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. +- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. +- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). +- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. +- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. + +License +------- + +- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go new file mode 100644 index 00000000000..42e3129d823 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go @@ -0,0 +1,61 @@ +// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package +// to supplement an in-memory map with persistent storage +// +package diskcache + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "github.com/peterbourgon/diskv" + "io" +) + +// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage +type Cache struct { + d *diskv.Diskv +} + +// Get returns the response corresponding to key if present +func (c *Cache) Get(key string) (resp []byte, ok bool) { + key = keyToFilename(key) + resp, err := c.d.Read(key) + if err != nil { + return []byte{}, false + } + return resp, true +} + +// Set saves a response to the cache as key +func (c *Cache) Set(key string, resp []byte) { + key = keyToFilename(key) + c.d.WriteStream(key, bytes.NewReader(resp), true) +} + +// Delete removes the response with key from the cache +func (c *Cache) Delete(key string) { + key = keyToFilename(key) + c.d.Erase(key) +} + +func keyToFilename(key string) string { + h := md5.New() + io.WriteString(h, key) + return hex.EncodeToString(h.Sum(nil)) +} + +// New returns a new Cache that will store files in basePath +func New(basePath string) *Cache { + return &Cache{ + d: diskv.New(diskv.Options{ + BasePath: basePath, + CacheSizeMax: 100 * 1024 * 1024, // 100MB + }), + } +} + +// NewWithDiskv returns a new Cache using the provided Diskv as underlying +// storage. +func NewWithDiskv(d *diskv.Diskv) *Cache { + return &Cache{d} +} diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache_test.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache_test.go new file mode 100644 index 00000000000..35c76cbd1cd --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache_test.go @@ -0,0 +1,42 @@ +package diskcache + +import ( + "bytes" + "io/ioutil" + "os" + "testing" +) + +func TestDiskCache(t *testing.T) { + tempDir, err := ioutil.TempDir("", "httpcache") + if err != nil { + t.Fatalf("TempDir: %v", err) + } + defer os.RemoveAll(tempDir) + + cache := New(tempDir) + + key := "testKey" + _, ok := cache.Get(key) + if ok { + t.Fatal("retrieved key before adding it") + } + + val := []byte("some bytes") + cache.Set(key, val) + + retVal, ok := cache.Get(key) + if !ok { + t.Fatal("could not retrieve an element we just added") + } + if !bytes.Equal(retVal, val) { + t.Fatal("retrieved a different value than what we put in") + } + + cache.Delete(key) + + _, ok = cache.Get(key) + if ok { + t.Fatal("deleted key still present") + } +} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go new file mode 100644 index 00000000000..8239edc2cb2 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -0,0 +1,553 @@ +// Package httpcache provides a http.RoundTripper implementation that works as a +// mostly RFC-compliant cache for http responses. +// +// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client +// and not for a shared proxy). +// +package httpcache + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "strings" + "sync" + "time" +) + +const ( + stale = iota + fresh + transparent + // XFromCache is the header added to responses that are returned from the cache + XFromCache = "X-From-Cache" +) + +// A Cache interface is used by the Transport to store and retrieve responses. +type Cache interface { + // Get returns the []byte representation of a cached response and a bool + // set to true if the value isn't empty + Get(key string) (responseBytes []byte, ok bool) + // Set stores the []byte representation of a response against a key + Set(key string, responseBytes []byte) + // Delete removes the value associated with the key + Delete(key string) +} + +// cacheKey returns the cache key for req. +func cacheKey(req *http.Request) string { + return req.URL.String() +} + +// CachedResponse returns the cached http.Response for req if present, and nil +// otherwise. +func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { + cachedVal, ok := c.Get(cacheKey(req)) + if !ok { + return + } + + b := bytes.NewBuffer(cachedVal) + return http.ReadResponse(bufio.NewReader(b), req) +} + +// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. +type MemoryCache struct { + mu sync.RWMutex + items map[string][]byte +} + +// Get returns the []byte representation of the response and true if present, false if not +func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { + c.mu.RLock() + resp, ok = c.items[key] + c.mu.RUnlock() + return resp, ok +} + +// Set saves response resp to the cache with key +func (c *MemoryCache) Set(key string, resp []byte) { + c.mu.Lock() + c.items[key] = resp + c.mu.Unlock() +} + +// Delete removes key from the cache +func (c *MemoryCache) Delete(key string) { + c.mu.Lock() + delete(c.items, key) + c.mu.Unlock() +} + +// NewMemoryCache returns a new Cache that will store items in an in-memory map +func NewMemoryCache() *MemoryCache { + c := &MemoryCache{items: map[string][]byte{}} + return c +} + +// Transport is an implementation of http.RoundTripper that will return values from a cache +// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) +// to repeated requests allowing servers to return 304 / Not Modified +type Transport struct { + // The RoundTripper interface actually used to make requests + // If nil, http.DefaultTransport is used + Transport http.RoundTripper + Cache Cache + // If true, responses returned from the cache will be given an extra header, X-From-Cache + MarkCachedResponses bool +} + +// NewTransport returns a new Transport with the +// provided Cache implementation and MarkCachedResponses set to true +func NewTransport(c Cache) *Transport { + return &Transport{Cache: c, MarkCachedResponses: true} +} + +// Client returns an *http.Client that caches responses. +func (t *Transport) Client() *http.Client { + return &http.Client{Transport: t} +} + +// varyMatches will return false unless all of the cached values for the headers listed in Vary +// match the new request +func varyMatches(cachedResp *http.Response, req *http.Request) bool { + for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { + header = http.CanonicalHeaderKey(header) + if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { + return false + } + } + return true +} + +// RoundTrip takes a Request and returns a Response +// +// If there is a fresh Response already in cache, then it will be returned without connecting to +// the server. +// +// If there is a stale Response, then any validators it contains will be set on the new request +// to give the server a chance to respond with NotModified. If this happens, then the cached Response +// will be returned. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + cacheKey := cacheKey(req) + cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" + var cachedResp *http.Response + if cacheable { + cachedResp, err = CachedResponse(t.Cache, req) + } else { + // Need to invalidate an existing value + t.Cache.Delete(cacheKey) + } + + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + + if cacheable && cachedResp != nil && err == nil { + if t.MarkCachedResponses { + cachedResp.Header.Set(XFromCache, "1") + } + + if varyMatches(cachedResp, req) { + // Can only use cached value if the new request doesn't Vary significantly + freshness := getFreshness(cachedResp.Header, req.Header) + if freshness == fresh { + return cachedResp, nil + } + + if freshness == stale { + var req2 *http.Request + // Add validators if caller hasn't already done so + etag := cachedResp.Header.Get("etag") + if etag != "" && req.Header.Get("etag") == "" { + req2 = cloneRequest(req) + req2.Header.Set("if-none-match", etag) + } + lastModified := cachedResp.Header.Get("last-modified") + if lastModified != "" && req.Header.Get("last-modified") == "" { + if req2 == nil { + req2 = cloneRequest(req) + } + req2.Header.Set("if-modified-since", lastModified) + } + if req2 != nil { + req = req2 + } + } + } + + resp, err = transport.RoundTrip(req) + if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { + // Replace the 304 response with the one from cache, but update with some new headers + endToEndHeaders := getEndToEndHeaders(resp.Header) + for _, header := range endToEndHeaders { + cachedResp.Header[header] = resp.Header[header] + } + cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) + cachedResp.StatusCode = http.StatusOK + + resp = cachedResp + } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && + req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { + // In case of transport failure and stale-if-error activated, returns cached content + // when available + cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) + cachedResp.StatusCode = http.StatusOK + return cachedResp, nil + } else { + if err != nil || resp.StatusCode != http.StatusOK { + t.Cache.Delete(cacheKey) + } + if err != nil { + return nil, err + } + } + } else { + reqCacheControl := parseCacheControl(req.Header) + if _, ok := reqCacheControl["only-if-cached"]; ok { + resp = newGatewayTimeoutResponse(req) + } else { + resp, err = transport.RoundTrip(req) + if err != nil { + return nil, err + } + } + } + + if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { + for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { + varyKey = http.CanonicalHeaderKey(varyKey) + fakeHeader := "X-Varied-" + varyKey + reqValue := req.Header.Get(varyKey) + if reqValue != "" { + resp.Header.Set(fakeHeader, reqValue) + } + } + switch req.Method { + case "GET": + // Delay caching until EOF is reached. + resp.Body = &cachingReadCloser{ + R: resp.Body, + OnEOF: func(r io.Reader) { + resp := *resp + resp.Body = ioutil.NopCloser(r) + respBytes, err := httputil.DumpResponse(&resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + }, + } + default: + respBytes, err := httputil.DumpResponse(resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + } + } else { + t.Cache.Delete(cacheKey) + } + return resp, nil +} + +// ErrNoDateHeader indicates that the HTTP headers contained no Date header. +var ErrNoDateHeader = errors.New("no Date header") + +// Date parses and returns the value of the Date header. +func Date(respHeaders http.Header) (date time.Time, err error) { + dateHeader := respHeaders.Get("date") + if dateHeader == "" { + err = ErrNoDateHeader + return + } + + return time.Parse(time.RFC1123, dateHeader) +} + +type realClock struct{} + +func (c *realClock) since(d time.Time) time.Duration { + return time.Since(d) +} + +type timer interface { + since(d time.Time) time.Duration +} + +var clock timer = &realClock{} + +// getFreshness will return one of fresh/stale/transparent based on the cache-control +// values of the request and the response +// +// fresh indicates the response can be returned +// stale indicates that the response needs validating before it is returned +// transparent indicates the response should not be used to fulfil the request +// +// Because this is only a private cache, 'public' and 'private' in cache-control aren't +// signficant. Similarly, smax-age isn't used. +func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + if _, ok := reqCacheControl["no-cache"]; ok { + return transparent + } + if _, ok := respCacheControl["no-cache"]; ok { + return stale + } + if _, ok := reqCacheControl["only-if-cached"]; ok { + return fresh + } + + date, err := Date(respHeaders) + if err != nil { + return stale + } + currentAge := clock.since(date) + + var lifetime time.Duration + var zeroDuration time.Duration + + // If a response includes both an Expires header and a max-age directive, + // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } else { + expiresHeader := respHeaders.Get("Expires") + if expiresHeader != "" { + expires, err := time.Parse(time.RFC1123, expiresHeader) + if err != nil { + lifetime = zeroDuration + } else { + lifetime = expires.Sub(date) + } + } + } + + if maxAge, ok := reqCacheControl["max-age"]; ok { + // the client is willing to accept a response whose age is no greater than the specified time in seconds + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } + if minfresh, ok := reqCacheControl["min-fresh"]; ok { + // the client wants a response that will still be fresh for at least the specified number of seconds. + minfreshDuration, err := time.ParseDuration(minfresh + "s") + if err == nil { + currentAge = time.Duration(currentAge + minfreshDuration) + } + } + + if maxstale, ok := reqCacheControl["max-stale"]; ok { + // Indicates that the client is willing to accept a response that has exceeded its expiration time. + // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded + // its expiration time by no more than the specified number of seconds. + // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. + // + // Responses served only because of a max-stale value are supposed to have a Warning header added to them, + // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different + // return-value available here. + if maxstale == "" { + return fresh + } + maxstaleDuration, err := time.ParseDuration(maxstale + "s") + if err == nil { + currentAge = time.Duration(currentAge - maxstaleDuration) + } + } + + if lifetime > currentAge { + return fresh + } + + return stale +} + +// Returns true if either the request or the response includes the stale-if-error +// cache control extension: https://tools.ietf.org/html/rfc5861 +func canStaleOnError(respHeaders, reqHeaders http.Header) bool { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + + var err error + lifetime := time.Duration(-1) + + if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + + if lifetime >= 0 { + date, err := Date(respHeaders) + if err != nil { + return false + } + currentAge := clock.since(date) + if lifetime > currentAge { + return true + } + } + + return false +} + +func getEndToEndHeaders(respHeaders http.Header) []string { + // These headers are always hop-by-hop + hopByHopHeaders := map[string]struct{}{ + "Connection": struct{}{}, + "Keep-Alive": struct{}{}, + "Proxy-Authenticate": struct{}{}, + "Proxy-Authorization": struct{}{}, + "Te": struct{}{}, + "Trailers": struct{}{}, + "Transfer-Encoding": struct{}{}, + "Upgrade": struct{}{}, + } + + for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { + // any header listed in connection, if present, is also considered hop-by-hop + if strings.Trim(extra, " ") != "" { + hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} + } + } + endToEndHeaders := []string{} + for respHeader, _ := range respHeaders { + if _, ok := hopByHopHeaders[respHeader]; !ok { + endToEndHeaders = append(endToEndHeaders, respHeader) + } + } + return endToEndHeaders +} + +func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { + if _, ok := respCacheControl["no-store"]; ok { + return false + } + if _, ok := reqCacheControl["no-store"]; ok { + return false + } + return true +} + +func newGatewayTimeoutResponse(req *http.Request) *http.Response { + var braw bytes.Buffer + braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") + resp, err := http.ReadResponse(bufio.NewReader(&braw), req) + if err != nil { + panic(err) + } + return resp +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// headerAllCommaSepValues returns all comma-separated values (each +// with whitespace trimmed) for header name in headers. According to +// Section 4.2 of the HTTP/1.1 spec +// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), +// values from multiple occurrences of a header should be concatenated, if +// the header's value is a comma-separated list. +func headerAllCommaSepValues(headers http.Header, name string) []string { + var vals []string + for _, val := range headers[http.CanonicalHeaderKey(name)] { + fields := strings.Split(val, ",") + for i, f := range fields { + fields[i] = strings.TrimSpace(f) + } + vals = append(vals, fields...) + } + return vals +} + +// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF +// handler with a full copy of the content read from R when EOF is +// reached. +type cachingReadCloser struct { + // Underlying ReadCloser. + R io.ReadCloser + // OnEOF is called with a copy of the content of R when EOF is reached. + OnEOF func(io.Reader) + + buf bytes.Buffer // buf stores a copy of the content of R. +} + +// Read reads the next len(p) bytes from R or until R is drained. The +// return value n is the number of bytes read. If R has no data to +// return, err is io.EOF and OnEOF is called with a full copy of what +// has been read so far. +func (r *cachingReadCloser) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.buf.Write(p[:n]) + if err == io.EOF { + r.OnEOF(bytes.NewReader(r.buf.Bytes())) + } + return n, err +} + +func (r *cachingReadCloser) Close() error { + return r.R.Close() +} + +// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation +func NewMemoryCacheTransport() *Transport { + c := NewMemoryCache() + t := NewTransport(c) + return t +} diff --git a/vendor/github.com/gregjones/httpcache/httpcache_test.go b/vendor/github.com/gregjones/httpcache/httpcache_test.go new file mode 100644 index 00000000000..c2eab023108 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache_test.go @@ -0,0 +1,1360 @@ +package httpcache + +import ( + "bytes" + "errors" + "flag" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strconv" + "testing" + "time" +) + +var s struct { + server *httptest.Server + client http.Client + transport *Transport + done chan struct{} // Closed to unlock infinite handlers. +} + +type fakeClock struct { + elapsed time.Duration +} + +func (c *fakeClock) since(t time.Time) time.Duration { + return c.elapsed +} + +func TestMain(m *testing.M) { + flag.Parse() + setup() + code := m.Run() + teardown() + os.Exit(code) +} + +func setup() { + tp := NewMemoryCacheTransport() + client := http.Client{Transport: tp} + s.transport = tp + s.client = client + s.done = make(chan struct{}) + + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + mux.HandleFunc("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + })) + + mux.HandleFunc("/method", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + w.Write([]byte(r.Method)) + })) + + mux.HandleFunc("/range", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + lm := "Fri, 14 Dec 2010 01:01:50 GMT" + if r.Header.Get("if-modified-since") == lm { + w.WriteHeader(http.StatusNotModified) + return + } + w.Header().Set("last-modified", lm) + if r.Header.Get("range") == "bytes=4-9" { + w.WriteHeader(http.StatusPartialContent) + w.Write([]byte(" text ")) + return + } + w.Write([]byte("Some text content")) + })) + + mux.HandleFunc("/nostore", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "no-store") + })) + + mux.HandleFunc("/etag", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + etag := "124567" + if r.Header.Get("if-none-match") == etag { + w.WriteHeader(http.StatusNotModified) + return + } + w.Header().Set("etag", etag) + })) + + mux.HandleFunc("/lastmodified", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + lm := "Fri, 14 Dec 2010 01:01:50 GMT" + if r.Header.Get("if-modified-since") == lm { + w.WriteHeader(http.StatusNotModified) + return + } + w.Header().Set("last-modified", lm) + })) + + mux.HandleFunc("/varyaccept", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Vary", "Accept") + w.Write([]byte("Some text content")) + })) + + mux.HandleFunc("/doublevary", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Vary", "Accept, Accept-Language") + w.Write([]byte("Some text content")) + })) + mux.HandleFunc("/2varyheaders", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + w.Header().Set("Content-Type", "text/plain") + w.Header().Add("Vary", "Accept") + w.Header().Add("Vary", "Accept-Language") + w.Write([]byte("Some text content")) + })) + mux.HandleFunc("/varyunused", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "max-age=3600") + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Vary", "X-Madeup-Header") + w.Write([]byte("Some text content")) + })) + + updateFieldsCounter := 0 + mux.HandleFunc("/updatefields", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Counter", strconv.Itoa(updateFieldsCounter)) + w.Header().Set("Etag", `"e"`) + updateFieldsCounter++ + if r.Header.Get("if-none-match") != "" { + w.WriteHeader(http.StatusNotModified) + return + } + w.Write([]byte("Some text content")) + })) + + // Take 3 seconds to return 200 OK (for testing client timeouts). + mux.HandleFunc("/3seconds", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(3 * time.Second) + })) + + mux.HandleFunc("/infinite", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for { + select { + case <-s.done: + return + default: + w.Write([]byte{0}) + } + } + })) +} + +func teardown() { + close(s.done) + s.server.Close() +} + +func resetTest() { + s.transport.Cache = NewMemoryCache() + clock = &realClock{} +} + +// TestCacheableMethod ensures that uncacheable method does not get stored +// in cache and get incorrectly used for a following cacheable method request. +func TestCacheableMethod(t *testing.T) { + resetTest() + { + req, err := http.NewRequest("POST", s.server.URL+"/method", nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), "POST"; got != want { + t.Errorf("got %q, want %q", got, want) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) + } + } + { + req, err := http.NewRequest("GET", s.server.URL+"/method", nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), "GET"; got != want { + t.Errorf("got wrong body %q, want %q", got, want) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) + } + if resp.Header.Get(XFromCache) != "" { + t.Errorf("XFromCache header isn't blank") + } + } +} + +func TestDontStorePartialRangeInCache(t *testing.T) { + resetTest() + { + req, err := http.NewRequest("GET", s.server.URL+"/range", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("range", "bytes=4-9") + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), " text "; got != want { + t.Errorf("got %q, want %q", got, want) + } + if resp.StatusCode != http.StatusPartialContent { + t.Errorf("response status code isn't 206 Partial Content: %v", resp.StatusCode) + } + } + { + req, err := http.NewRequest("GET", s.server.URL+"/range", nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), "Some text content"; got != want { + t.Errorf("got %q, want %q", got, want) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) + } + if resp.Header.Get(XFromCache) != "" { + t.Error("XFromCache header isn't blank") + } + } + { + req, err := http.NewRequest("GET", s.server.URL+"/range", nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), "Some text content"; got != want { + t.Errorf("got %q, want %q", got, want) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode) + } + if resp.Header.Get(XFromCache) != "1" { + t.Errorf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } + { + req, err := http.NewRequest("GET", s.server.URL+"/range", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("range", "bytes=4-9") + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if err != nil { + t.Fatal(err) + } + err = resp.Body.Close() + if err != nil { + t.Fatal(err) + } + if got, want := buf.String(), " text "; got != want { + t.Errorf("got %q, want %q", got, want) + } + if resp.StatusCode != http.StatusPartialContent { + t.Errorf("response status code isn't 206 Partial Content: %v", resp.StatusCode) + } + } +} + +func TestCacheOnlyIfBodyRead(t *testing.T) { + resetTest() + { + req, err := http.NewRequest("GET", s.server.URL, nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + // We do not read the body + resp.Body.Close() + } + { + req, err := http.NewRequest("GET", s.server.URL, nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatalf("XFromCache header isn't blank") + } + } +} + +func TestOnlyReadBodyOnDemand(t *testing.T) { + resetTest() + + req, err := http.NewRequest("GET", s.server.URL+"/infinite", nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) // This shouldn't hang forever. + if err != nil { + t.Fatal(err) + } + buf := make([]byte, 10) // Only partially read the body. + _, err = resp.Body.Read(buf) + if err != nil { + t.Fatal(err) + } + resp.Body.Close() +} + +func TestGetOnlyIfCachedHit(t *testing.T) { + resetTest() + { + req, err := http.NewRequest("GET", s.server.URL, nil) + if err != nil { + t.Fatal(err) + } + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + } + { + req, err := http.NewRequest("GET", s.server.URL, nil) + if err != nil { + t.Fatal(err) + } + req.Header.Add("cache-control", "only-if-cached") + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + if resp.StatusCode != http.StatusOK { + t.Fatalf("response status code isn't 200 OK: %v", resp.StatusCode) + } + } +} + +func TestGetOnlyIfCachedMiss(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL, nil) + if err != nil { + t.Fatal(err) + } + req.Header.Add("cache-control", "only-if-cached") + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + if resp.StatusCode != http.StatusGatewayTimeout { + t.Fatalf("response status code isn't 504 GatewayTimeout: %v", resp.StatusCode) + } +} + +func TestGetNoStoreRequest(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL, nil) + if err != nil { + t.Fatal(err) + } + req.Header.Add("Cache-Control", "no-store") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } +} + +func TestGetNoStoreResponse(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/nostore", nil) + if err != nil { + t.Fatal(err) + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } +} + +func TestGetWithEtag(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/etag", nil) + if err != nil { + t.Fatal(err) + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + // additional assertions to verify that 304 response is converted properly + if resp.StatusCode != http.StatusOK { + t.Fatalf("response status code isn't 200 OK: %v", resp.StatusCode) + } + if _, ok := resp.Header["Connection"]; ok { + t.Fatalf("Connection header isn't absent") + } + } +} + +func TestGetWithLastModified(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/lastmodified", nil) + if err != nil { + t.Fatal(err) + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } +} + +func TestGetWithVary(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/varyaccept", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("Accept", "text/plain") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get("Vary") != "Accept" { + t.Fatalf(`Vary header isn't "Accept": %v`, resp.Header.Get("Vary")) + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } + req.Header.Set("Accept", "text/html") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + req.Header.Set("Accept", "") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } +} + +func TestGetWithDoubleVary(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/doublevary", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("Accept", "text/plain") + req.Header.Set("Accept-Language", "da, en-gb;q=0.8, en;q=0.7") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get("Vary") == "" { + t.Fatalf(`Vary header is blank`) + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } + req.Header.Set("Accept-Language", "") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + req.Header.Set("Accept-Language", "da") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } +} + +func TestGetWith2VaryHeaders(t *testing.T) { + resetTest() + // Tests that multiple Vary headers' comma-separated lists are + // merged. See https://github.com/gregjones/httpcache/issues/27. + const ( + accept = "text/plain" + acceptLanguage = "da, en-gb;q=0.8, en;q=0.7" + ) + req, err := http.NewRequest("GET", s.server.URL+"/2varyheaders", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("Accept", accept) + req.Header.Set("Accept-Language", acceptLanguage) + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get("Vary") == "" { + t.Fatalf(`Vary header is blank`) + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } + req.Header.Set("Accept-Language", "") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + req.Header.Set("Accept-Language", "da") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + req.Header.Set("Accept-Language", acceptLanguage) + req.Header.Set("Accept", "") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + } + req.Header.Set("Accept", "image/png") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "" { + t.Fatal("XFromCache header isn't blank") + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } +} + +func TestGetVaryUnused(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/varyunused", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("Accept", "text/plain") + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get("Vary") == "" { + t.Fatalf(`Vary header is blank`) + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + } +} + +func TestUpdateFields(t *testing.T) { + resetTest() + req, err := http.NewRequest("GET", s.server.URL+"/updatefields", nil) + if err != nil { + t.Fatal(err) + } + var counter, counter2 string + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + counter = resp.Header.Get("x-counter") + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + } + { + resp, err := s.client.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.Header.Get(XFromCache) != "1" { + t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache)) + } + counter2 = resp.Header.Get("x-counter") + } + if counter == counter2 { + t.Fatalf(`both "x-counter" values are equal: %v %v`, counter, counter2) + } +} + +func TestParseCacheControl(t *testing.T) { + resetTest() + h := http.Header{} + for range parseCacheControl(h) { + t.Fatal("cacheControl should be empty") + } + + h.Set("cache-control", "no-cache") + { + cc := parseCacheControl(h) + if _, ok := cc["foo"]; ok { + t.Error(`Value "foo" shouldn't exist`) + } + noCache, ok := cc["no-cache"] + if !ok { + t.Fatalf(`"no-cache" value isn't set`) + } + if noCache != "" { + t.Fatalf(`"no-cache" value isn't blank: %v`, noCache) + } + } + h.Set("cache-control", "no-cache, max-age=3600") + { + cc := parseCacheControl(h) + noCache, ok := cc["no-cache"] + if !ok { + t.Fatalf(`"no-cache" value isn't set`) + } + if noCache != "" { + t.Fatalf(`"no-cache" value isn't blank: %v`, noCache) + } + if cc["max-age"] != "3600" { + t.Fatalf(`"max-age" value isn't "3600": %v`, cc["max-age"]) + } + } +} + +func TestNoCacheRequestExpiration(t *testing.T) { + resetTest() + respHeaders := http.Header{} + respHeaders.Set("Cache-Control", "max-age=7200") + + reqHeaders := http.Header{} + reqHeaders.Set("Cache-Control", "no-cache") + if getFreshness(respHeaders, reqHeaders) != transparent { + t.Fatal("freshness isn't transparent") + } +} + +func TestNoCacheResponseExpiration(t *testing.T) { + resetTest() + respHeaders := http.Header{} + respHeaders.Set("Cache-Control", "no-cache") + respHeaders.Set("Expires", "Wed, 19 Apr 3000 11:43:00 GMT") + + reqHeaders := http.Header{} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestReqMustRevalidate(t *testing.T) { + resetTest() + // not paying attention to request setting max-stale means never returning stale + // responses, so always acting as if must-revalidate is set + respHeaders := http.Header{} + + reqHeaders := http.Header{} + reqHeaders.Set("Cache-Control", "must-revalidate") + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestRespMustRevalidate(t *testing.T) { + resetTest() + respHeaders := http.Header{} + respHeaders.Set("Cache-Control", "must-revalidate") + + reqHeaders := http.Header{} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestFreshExpiration(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("expires", now.Add(time.Duration(2)*time.Second).Format(time.RFC1123)) + + reqHeaders := http.Header{} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + clock = &fakeClock{elapsed: 3 * time.Second} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestMaxAge(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("cache-control", "max-age=2") + + reqHeaders := http.Header{} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + clock = &fakeClock{elapsed: 3 * time.Second} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestMaxAgeZero(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("cache-control", "max-age=0") + + reqHeaders := http.Header{} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestBothMaxAge(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("cache-control", "max-age=2") + + reqHeaders := http.Header{} + reqHeaders.Set("cache-control", "max-age=0") + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestMinFreshWithExpires(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("expires", now.Add(time.Duration(2)*time.Second).Format(time.RFC1123)) + + reqHeaders := http.Header{} + reqHeaders.Set("cache-control", "min-fresh=1") + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + reqHeaders = http.Header{} + reqHeaders.Set("cache-control", "min-fresh=2") + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func TestEmptyMaxStale(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("cache-control", "max-age=20") + + reqHeaders := http.Header{} + reqHeaders.Set("cache-control", "max-stale") + clock = &fakeClock{elapsed: 10 * time.Second} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + clock = &fakeClock{elapsed: 60 * time.Second} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } +} + +func TestMaxStaleValue(t *testing.T) { + resetTest() + now := time.Now() + respHeaders := http.Header{} + respHeaders.Set("date", now.Format(time.RFC1123)) + respHeaders.Set("cache-control", "max-age=10") + + reqHeaders := http.Header{} + reqHeaders.Set("cache-control", "max-stale=20") + clock = &fakeClock{elapsed: 5 * time.Second} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + clock = &fakeClock{elapsed: 15 * time.Second} + if getFreshness(respHeaders, reqHeaders) != fresh { + t.Fatal("freshness isn't fresh") + } + + clock = &fakeClock{elapsed: 30 * time.Second} + if getFreshness(respHeaders, reqHeaders) != stale { + t.Fatal("freshness isn't stale") + } +} + +func containsHeader(headers []string, header string) bool { + for _, v := range headers { + if http.CanonicalHeaderKey(v) == http.CanonicalHeaderKey(header) { + return true + } + } + return false +} + +func TestGetEndToEndHeaders(t *testing.T) { + resetTest() + var ( + headers http.Header + end2end []string + ) + + headers = http.Header{} + headers.Set("content-type", "text/html") + headers.Set("te", "deflate") + + end2end = getEndToEndHeaders(headers) + if !containsHeader(end2end, "content-type") { + t.Fatal(`doesn't contain "content-type" header`) + } + if containsHeader(end2end, "te") { + t.Fatal(`doesn't contain "te" header`) + } + + headers = http.Header{} + headers.Set("connection", "content-type") + headers.Set("content-type", "text/csv") + headers.Set("te", "deflate") + end2end = getEndToEndHeaders(headers) + if containsHeader(end2end, "connection") { + t.Fatal(`doesn't contain "connection" header`) + } + if containsHeader(end2end, "content-type") { + t.Fatal(`doesn't contain "content-type" header`) + } + if containsHeader(end2end, "te") { + t.Fatal(`doesn't contain "te" header`) + } + + headers = http.Header{} + end2end = getEndToEndHeaders(headers) + if len(end2end) != 0 { + t.Fatal(`non-zero end2end headers`) + } + + headers = http.Header{} + headers.Set("connection", "content-type") + end2end = getEndToEndHeaders(headers) + if len(end2end) != 0 { + t.Fatal(`non-zero end2end headers`) + } +} + +type transportMock struct { + response *http.Response + err error +} + +func (t transportMock) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.response, t.err +} + +func TestStaleIfErrorRequest(t *testing.T) { + resetTest() + now := time.Now() + tmock := transportMock{ + response: &http.Response{ + Status: http.StatusText(http.StatusOK), + StatusCode: http.StatusOK, + Header: http.Header{ + "Date": []string{now.Format(time.RFC1123)}, + "Cache-Control": []string{"no-cache"}, + }, + Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), + }, + err: nil, + } + tp := NewMemoryCacheTransport() + tp.Transport = &tmock + + // First time, response is cached on success + r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) + r.Header.Set("Cache-Control", "stale-if-error") + resp, err := tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + + // On failure, response is returned from the cache + tmock.response = nil + tmock.err = errors.New("some error") + resp, err = tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } +} + +func TestStaleIfErrorRequestLifetime(t *testing.T) { + resetTest() + now := time.Now() + tmock := transportMock{ + response: &http.Response{ + Status: http.StatusText(http.StatusOK), + StatusCode: http.StatusOK, + Header: http.Header{ + "Date": []string{now.Format(time.RFC1123)}, + "Cache-Control": []string{"no-cache"}, + }, + Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), + }, + err: nil, + } + tp := NewMemoryCacheTransport() + tp.Transport = &tmock + + // First time, response is cached on success + r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) + r.Header.Set("Cache-Control", "stale-if-error=100") + resp, err := tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + + // On failure, response is returned from the cache + tmock.response = nil + tmock.err = errors.New("some error") + resp, err = tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + + // Same for http errors + tmock.response = &http.Response{StatusCode: http.StatusInternalServerError} + tmock.err = nil + resp, err = tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + + // If failure last more than max stale, error is returned + clock = &fakeClock{elapsed: 200 * time.Second} + _, err = tp.RoundTrip(r) + if err != tmock.err { + t.Fatalf("got err %v, want %v", err, tmock.err) + } +} + +func TestStaleIfErrorResponse(t *testing.T) { + resetTest() + now := time.Now() + tmock := transportMock{ + response: &http.Response{ + Status: http.StatusText(http.StatusOK), + StatusCode: http.StatusOK, + Header: http.Header{ + "Date": []string{now.Format(time.RFC1123)}, + "Cache-Control": []string{"no-cache, stale-if-error"}, + }, + Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), + }, + err: nil, + } + tp := NewMemoryCacheTransport() + tp.Transport = &tmock + + // First time, response is cached on success + r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) + resp, err := tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + + // On failure, response is returned from the cache + tmock.response = nil + tmock.err = errors.New("some error") + resp, err = tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } +} + +func TestStaleIfErrorResponseLifetime(t *testing.T) { + resetTest() + now := time.Now() + tmock := transportMock{ + response: &http.Response{ + Status: http.StatusText(http.StatusOK), + StatusCode: http.StatusOK, + Header: http.Header{ + "Date": []string{now.Format(time.RFC1123)}, + "Cache-Control": []string{"no-cache, stale-if-error=100"}, + }, + Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))), + }, + err: nil, + } + tp := NewMemoryCacheTransport() + tp.Transport = &tmock + + // First time, response is cached on success + r, _ := http.NewRequest("GET", "http://somewhere.com/", nil) + resp, err := tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + + // On failure, response is returned from the cache + tmock.response = nil + tmock.err = errors.New("some error") + resp, err = tp.RoundTrip(r) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp is nil") + } + + // If failure last more than max stale, error is returned + clock = &fakeClock{elapsed: 200 * time.Second} + _, err = tp.RoundTrip(r) + if err != tmock.err { + t.Fatalf("got err %v, want %v", err, tmock.err) + } +} + +// Test that http.Client.Timeout is respected when cache transport is used. +// That is so as long as request cancellation is propagated correctly. +// In the past, that required CancelRequest to be implemented correctly, +// but modern http.Client uses Request.Cancel (or request context) instead, +// so we don't have to do anything. +func TestClientTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping timeout test in short mode") // Because it takes at least 3 seconds to run. + } + resetTest() + client := &http.Client{ + Transport: NewMemoryCacheTransport(), + Timeout: time.Second, + } + started := time.Now() + resp, err := client.Get(s.server.URL + "/3seconds") + taken := time.Since(started) + if err == nil { + t.Error("got nil error, want timeout error") + } + if resp != nil { + t.Error("got non-nil resp, want nil resp") + } + if taken >= 2*time.Second { + t.Error("client.Do took 2+ seconds, want < 2 seconds") + } +} diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml new file mode 100644 index 00000000000..955dc0be5fa --- /dev/null +++ b/vendor/github.com/json-iterator/go/.codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "output_tests/.*" + diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore new file mode 100644 index 00000000000..501fcdc9a69 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -0,0 +1,3 @@ +.idea +/coverage.txt +/profile.out diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml new file mode 100644 index 00000000000..945b9c5947c --- /dev/null +++ b/vendor/github.com/json-iterator/go/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.8.x + +before_install: + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/exponent-io/jsonpath/LICENSE b/vendor/github.com/json-iterator/go/LICENSE similarity index 94% rename from vendor/github.com/exponent-io/jsonpath/LICENSE rename to vendor/github.com/json-iterator/go/LICENSE index 54197725078..2cf4f5ab28e 100644 --- a/vendor/github.com/exponent-io/jsonpath/LICENSE +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2015 Exponent Labs LLC +Copyright (c) 2016 json-iterator Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 00000000000..23a4b57c8e7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,80 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +``` +Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com +``` + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --- | --- | --- | --- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import "github.com/json-iterator/go" +jsoniter.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import "github.com/json-iterator/go" +jsoniter.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +* [thockin](https://github.com/thockin) +* [mattn](https://github.com/mattn) +* [cch123](https://github.com/cch123) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/example_test.go b/vendor/github.com/json-iterator/go/example_test.go new file mode 100644 index 00000000000..1c8f341c146 --- /dev/null +++ b/vendor/github.com/json-iterator/go/example_test.go @@ -0,0 +1,95 @@ +package jsoniter + +import ( + "fmt" + "os" +) + +func ExampleMarshal() { + type ColorGroup struct { + ID int + Name string + Colors []string + } + group := ColorGroup{ + ID: 1, + Name: "Reds", + Colors: []string{"Crimson", "Red", "Ruby", "Maroon"}, + } + b, err := Marshal(group) + if err != nil { + fmt.Println("error:", err) + } + os.Stdout.Write(b) + // Output: + // {"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]} +} + +func ExampleUnmarshal() { + var jsonBlob = []byte(`[ + {"Name": "Platypus", "Order": "Monotremata"}, + {"Name": "Quoll", "Order": "Dasyuromorphia"} + ]`) + type Animal struct { + Name string + Order string + } + var animals []Animal + err := Unmarshal(jsonBlob, &animals) + if err != nil { + fmt.Println("error:", err) + } + fmt.Printf("%+v", animals) + // Output: + // [{Name:Platypus Order:Monotremata} {Name:Quoll Order:Dasyuromorphia}] +} + +func ExampleConfigFastest_Marshal() { + type ColorGroup struct { + ID int + Name string + Colors []string + } + group := ColorGroup{ + ID: 1, + Name: "Reds", + Colors: []string{"Crimson", "Red", "Ruby", "Maroon"}, + } + stream := ConfigFastest.BorrowStream(nil) + defer ConfigFastest.ReturnStream(stream) + stream.WriteVal(group) + if stream.Error != nil { + fmt.Println("error:", stream.Error) + } + os.Stdout.Write(stream.Buffer()) + // Output: + // {"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]} +} + +func ExampleConfigFastest_Unmarshal() { + var jsonBlob = []byte(`[ + {"Name": "Platypus", "Order": "Monotremata"}, + {"Name": "Quoll", "Order": "Dasyuromorphia"} + ]`) + type Animal struct { + Name string + Order string + } + var animals []Animal + iter := ConfigFastest.BorrowIterator(jsonBlob) + defer ConfigFastest.ReturnIterator(iter) + iter.ReadVal(&animals) + if iter.Error != nil { + fmt.Println("error:", iter.Error) + } + fmt.Printf("%+v", animals) + // Output: + // [{Name:Platypus Order:Monotremata} {Name:Quoll Order:Dasyuromorphia}] +} + +func ExampleGet() { + val := []byte(`{"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]}`) + fmt.Printf(Get(val, "Colors", 0).ToString()) + // Output: + // Crimson +} diff --git a/vendor/github.com/json-iterator/go/feature_adapter.go b/vendor/github.com/json-iterator/go/feature_adapter.go new file mode 100644 index 00000000000..edb477c4fd1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_adapter.go @@ -0,0 +1,127 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +func lastNotSpacePos(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' { + return i + 1 + } + } + return 0 +} + +// UnmarshalFromString convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + return adapter.iter.head != adapter.iter.tail +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string) +func (adapter *Decoder) UseNumber() { + origCfg := adapter.iter.cfg.configBeforeFrozen + origCfg.UseNumber = true + adapter.iter.cfg = origCfg.Froze().(*frozenConfig) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + adapter.stream.cfg.indentionStep = len(indent) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.Froze().(*frozenConfig) +} diff --git a/vendor/github.com/json-iterator/go/feature_any.go b/vendor/github.com/json-iterator/go/feature_any.go new file mode 100644 index 00000000000..6733dce4cc5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any.go @@ -0,0 +1,242 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + // TODO: add Set + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + return WrapUint64(uint64(val.(uint))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_array.go b/vendor/github.com/json-iterator/go/feature_any_array.go new file mode 100644 index 00000000000..0449e9aa428 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_bool.go b/vendor/github.com/json-iterator/go/feature_any_bool.go new file mode 100644 index 00000000000..9452324af5b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/feature_any_float.go b/vendor/github.com/json-iterator/go/feature_any_float.go new file mode 100644 index 00000000000..35fdb09497f --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_int32.go b/vendor/github.com/json-iterator/go/feature_any_int32.go new file mode 100644 index 00000000000..1b56f399150 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_int64.go b/vendor/github.com/json-iterator/go/feature_any_int64.go new file mode 100644 index 00000000000..c440d72b6d3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_invalid.go b/vendor/github.com/json-iterator/go/feature_any_invalid.go new file mode 100644 index 00000000000..1d859eac327 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/feature_any_nil.go b/vendor/github.com/json-iterator/go/feature_any_nil.go new file mode 100644 index 00000000000..d04cb54c11c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/feature_any_number.go b/vendor/github.com/json-iterator/go/feature_any_number.go new file mode 100644 index 00000000000..4e1c27641d2 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_number.go @@ -0,0 +1,104 @@ +package jsoniter + +import "unsafe" + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_object.go b/vendor/github.com/json-iterator/go/feature_any_object.go new file mode 100644 index 00000000000..c44ef5c989a --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_string.go b/vendor/github.com/json-iterator/go/feature_any_string.go new file mode 100644 index 00000000000..abf060bd59a --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_string.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + endPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + endPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint32.go b/vendor/github.com/json-iterator/go/feature_any_uint32.go new file mode 100644 index 00000000000..656bbd33d7e --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint64.go b/vendor/github.com/json-iterator/go/feature_any_uint64.go new file mode 100644 index 00000000000..7df2fce33ba --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_config.go b/vendor/github.com/json-iterator/go/feature_config.go new file mode 100644 index 00000000000..fc055d504eb --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_config.go @@ -0,0 +1,312 @@ +package jsoniter + +import ( + "encoding/json" + "errors" + "io" + "reflect" + "sync/atomic" + "unsafe" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + TagKey string +} + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + decoderCache unsafe.Pointer + encoderCache unsafe.Pointer + extensions []Extension + streamPool chan *Stream + iteratorPool chan *Iterator +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, +}.Froze() + +// Froze forge API from config +func (cfg Config) Froze() API { + // TODO: cache frozen config + frozenConfig := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + streamPool: make(chan *Stream, 16), + iteratorPool: make(chan *Iterator, 16), + } + atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{})) + atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{})) + if cfg.MarshalFloatWith6Digits { + frozenConfig.marshalFloatWith6Digits() + } + if cfg.EscapeHTML { + frozenConfig.escapeHTML() + } + if cfg.UseNumber { + frozenConfig.useNumber() + } + frozenConfig.configBeforeFrozen = cfg + return frozenConfig +} + +func (cfg *frozenConfig) useNumber() { + cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }}) +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) registerExtension(extension Extension) { + cfg.extensions = append(cfg.extensions, extension) +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits() { + // for better performance + cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{}) + cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{}) +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML() { + cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{}) +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { + done := false + for !done { + ptr := atomic.LoadPointer(&cfg.decoderCache) + cache := *(*map[reflect.Type]ValDecoder)(ptr) + copied := map[reflect.Type]ValDecoder{} + for k, v := range cache { + copied[k] = v + } + copied[cacheKey] = decoder + done = atomic.CompareAndSwapPointer(&cfg.decoderCache, ptr, unsafe.Pointer(&copied)) + } +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { + done := false + for !done { + ptr := atomic.LoadPointer(&cfg.encoderCache) + cache := *(*map[reflect.Type]ValEncoder)(ptr) + copied := map[reflect.Type]ValEncoder{} + for k, v := range cache { + copied[k] = v + } + copied[cacheKey] = encoder + done = atomic.CompareAndSwapPointer(&cfg.encoderCache, ptr, unsafe.Pointer(&copied)) + } +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { + ptr := atomic.LoadPointer(&cfg.decoderCache) + cache := *(*map[reflect.Type]ValDecoder)(ptr) + return cache[cacheKey] +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { + ptr := atomic.LoadPointer(&cfg.encoderCache) + cache := *(*map[reflect.Type]ValEncoder)(ptr) + return cache[cacheKey] +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.Froze().Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + data = data[:lastNotSpacePos(data)] + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + if iter.head == iter.tail { + iter.loadMore() + } + if iter.Error == io.EOF { + return nil + } + if iter.Error == nil { + iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal") + } + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + data = data[:lastNotSpacePos(data)] + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + typ := reflect.TypeOf(v) + if typ.Kind() != reflect.Ptr { + // return non-pointer error + return errors.New("the second param must be ptr type") + } + iter.ReadVal(v) + if iter.head == iter.tail { + iter.loadMore() + } + if iter.Error == io.EOF { + return nil + } + if iter.Error == nil { + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + } + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} diff --git a/vendor/github.com/json-iterator/go/feature_iter.go b/vendor/github.com/json-iterator/go/feature_iter.go new file mode 100644 index 00000000000..4357d69bac7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter.go @@ -0,0 +1,307 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + captureStartedAt int + captured []byte + Error error +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely") + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + iter.Error = fmt.Errorf("%s: %s, parsing %v ...%s... at %s", operation, msg, iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing %v ...|%s|... at %s", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_array.go b/vendor/github.com/json-iterator/go/feature_iter_array.go new file mode 100644 index 00000000000..cbc3ec8d16a --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_array.go @@ -0,0 +1,58 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found: "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end") + return false + } + return true + } + return true + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found: "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_float.go b/vendor/github.com/json-iterator/go/feature_iter_float.go new file mode 100644 index 00000000000..86f45991224 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_float.go @@ -0,0 +1,341 @@ +package jsoniter + +import ( + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_int.go b/vendor/github.com/json-iterator/go/feature_iter_int.go new file mode 100644 index 00000000000..886879efdbb --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_int.go @@ -0,0 +1,258 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + return value + } + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_object.go b/vendor/github.com/json-iterator/go/feature_iter_object.go new file mode 100644 index 00000000000..3bdb5576eed --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_object.go @@ -0,0 +1,212 @@ +package jsoniter + +import ( + "fmt" + "unicode" + "unsafe" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + return string(iter.readObjectFieldAsBytes()) + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {`) + return + case ',': + return string(iter.readObjectFieldAsBytes()) + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +func (iter *Iterator) readFieldHash() int32 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c == '"' { + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if 'A' <= b && b <= 'Z' { + b += 'a' - 'A' + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + } + return int32(hash) + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } + } + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 +} + +func calcHash(str string) int32 { + hash := int64(0x811c9dc5) + for _, b := range str { + hash ^= int64(unicode.ToLower(b)) + hash *= 0x1000193 + } + return int32(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.readObjectFieldAsBytes() + if !callback(iter, *(*string)(unsafe.Pointer(&field))) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.readObjectFieldAsBytes() + if !callback(iter, *(*string)(unsafe.Pointer(&field))) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadObjectCB", `expect " after }`) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n`) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field") + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field") + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadMapCB", `expect " after }`) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n`) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n") + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field") + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip.go b/vendor/github.com/json-iterator/go/feature_iter_skip.go new file mode 100644 index 00000000000..b008d98c99a --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip.go @@ -0,0 +1,127 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f") + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +type captureBuffer struct { + startedAt int + captured []byte +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = make([]byte, 0, 32) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + if len(captured) == 0 { + return remaining + } + captured = append(captured, remaining...) + return captured +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go new file mode 100644 index 00000000000..047d58a4bc9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go @@ -0,0 +1,144 @@ +//+build jsoniter-sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + case ']': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + case '}': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go new file mode 100644 index 00000000000..d2676382540 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go @@ -0,0 +1,89 @@ +//+build !jsoniter-sloppy + +package jsoniter + +import "fmt" + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + iter.ReadFloat32() + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_string.go b/vendor/github.com/json-iterator/go/feature_iter_string.go new file mode 100644 index 00000000000..b764600460e --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_string.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n`) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("ReadString", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("ReadString", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadString", `expects " or n`) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f") + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/feature_json_number.go b/vendor/github.com/json-iterator/go/feature_json_number.go new file mode 100644 index 00000000000..0439f672528 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_json_number.go @@ -0,0 +1,15 @@ +package jsoniter + +import "encoding/json" + +type Number string + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} diff --git a/vendor/github.com/json-iterator/go/feature_pool.go b/vendor/github.com/json-iterator/go/feature_pool.go new file mode 100644 index 00000000000..73962bc6f6c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_pool.go @@ -0,0 +1,57 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + select { + case stream := <-cfg.streamPool: + stream.Reset(writer) + return stream + default: + return NewStream(cfg, writer, 512) + } +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.Error = nil + select { + case cfg.streamPool <- stream: + return + default: + return + } +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + select { + case iter := <-cfg.iteratorPool: + iter.ResetBytes(data) + return iter + default: + return ParseBytes(cfg, data) + } +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + select { + case cfg.iteratorPool <- iter: + return + default: + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect.go b/vendor/github.com/json-iterator/go/feature_reflect.go new file mode 100644 index 00000000000..05d91b49c8b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect.go @@ -0,0 +1,691 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "fmt" + "reflect" + "time" + "unsafe" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) + EncodeInterface(val interface{}, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +// WriteToStream the default implementation for TypeEncoder method EncodeInterface +func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) { + e := (*emptyInterface)(unsafe.Pointer(&val)) + if e.word == nil { + stream.WriteNil() + return + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +var jsonNumberType reflect.Type +var jsoniterNumberType reflect.Type +var jsonRawMessageType reflect.Type +var jsoniterRawMessageType reflect.Type +var anyType reflect.Type +var marshalerType reflect.Type +var unmarshalerType reflect.Type +var textMarshalerType reflect.Type +var textUnmarshalerType reflect.Type + +func init() { + jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem() + jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem() + jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() + jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem() + anyType = reflect.TypeOf((*Any)(nil)).Elem() + marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +} + +type optionalDecoder struct { + valueType reflect.Type + valueDecoder ValDecoder +} + +func (decoder *optionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + value := reflect.New(decoder.valueType) + newPtr := extractInterface(value.Interface()).word + decoder.valueDecoder.Decode(newPtr, iter) + *((*uintptr)(ptr)) = uintptr(newPtr) + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type deferenceDecoder struct { + // only to deference a pointer + valueType reflect.Type + valueDecoder ValDecoder +} + +func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + value := reflect.New(decoder.valueType) + newPtr := extractInterface(value.Interface()).word + decoder.valueDecoder.Decode(newPtr, iter) + *((*uintptr)(ptr)) = uintptr(newPtr) + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type optionalEncoder struct { + valueEncoder ValEncoder +} + +func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *optionalEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *optionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if *((*unsafe.Pointer)(ptr)) == nil { + return true + } + return false +} + +type placeholderEncoder struct { + cfg *frozenConfig + cacheKey reflect.Type +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.getRealEncoder().Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.getRealEncoder().IsEmpty(ptr) +} + +func (encoder *placeholderEncoder) getRealEncoder() ValEncoder { + for i := 0; i < 30; i++ { + realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey) + _, isPlaceholder := realDecoder.(*placeholderEncoder) + if isPlaceholder { + time.Sleep(time.Second) + } else { + return realDecoder + } + } + panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey)) +} + +type placeholderDecoder struct { + cfg *frozenConfig + cacheKey reflect.Type +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + for i := 0; i < 30; i++ { + realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey) + _, isPlaceholder := realDecoder.(*placeholderDecoder) + if isPlaceholder { + time.Sleep(time.Second) + } else { + realDecoder.Decode(ptr, iter) + return + } + } + panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey)) +} + +// emptyInterface is the header for an interface{} value. +type emptyInterface struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +// emptyInterface is the header for an interface with method (not interface{}) +type nonEmptyInterface struct { + // see ../runtime/iface.go:/Itab + itab *struct { + ityp unsafe.Pointer // static interface type + typ unsafe.Pointer // dynamic concrete type + link unsafe.Pointer + bad int32 + unused int32 + fun [100000]unsafe.Pointer // method table + } + word unsafe.Pointer +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + typ := reflect.TypeOf(obj) + cacheKey := typ.Elem() + decoder, err := decoderOfType(iter.cfg, cacheKey) + if err != nil { + iter.Error = err + return + } + e := (*emptyInterface)(unsafe.Pointer(&obj)) + decoder.Decode(e.word, iter) +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + typ := reflect.TypeOf(val) + cacheKey := typ + encoder, err := encoderOfType(stream.cfg, cacheKey) + if err != nil { + stream.Error = err + return + } + encoder.EncodeInterface(val, stream) +} + +type prefix string + +func (p prefix) addToDecoder(decoder ValDecoder, err error) (ValDecoder, error) { + if err != nil { + return nil, fmt.Errorf("%s: %s", p, err.Error()) + } + return decoder, err +} + +func (p prefix) addToEncoder(encoder ValEncoder, err error) (ValEncoder, error) { + if err != nil { + return nil, fmt.Errorf("%s: %s", p, err.Error()) + } + return encoder, err +} + +func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + cacheKey := typ + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder, nil + } + decoder = getTypeDecoderFromExtension(typ) + if decoder != nil { + cfg.addDecoderToCache(cacheKey, decoder) + return decoder, nil + } + decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey} + cfg.addDecoderToCache(cacheKey, decoder) + decoder, err := createDecoderOfType(cfg, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + cfg.addDecoderToCache(cacheKey, decoder) + return decoder, err +} + +func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + typeName := typ.String() + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{}, nil + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{}, nil + } + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{}, nil + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{}, nil + } + if typ.Implements(unmarshalerType) { + templateInterface := reflect.New(typ).Elem().Interface() + var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} + if typ.Kind() == reflect.Ptr { + decoder = &optionalDecoder{typ.Elem(), decoder} + } + return decoder, nil + } + if reflect.PtrTo(typ).Implements(unmarshalerType) { + templateInterface := reflect.New(typ).Interface() + var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} + return decoder, nil + } + if typ.Implements(textUnmarshalerType) { + templateInterface := reflect.New(typ).Elem().Interface() + var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} + if typ.Kind() == reflect.Ptr { + decoder = &optionalDecoder{typ.Elem(), decoder} + } + return decoder, nil + } + if reflect.PtrTo(typ).Implements(textUnmarshalerType) { + templateInterface := reflect.New(typ).Interface() + var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} + return decoder, nil + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + sliceDecoder, err := prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) + if err != nil { + return nil, err + } + return &base64Codec{sliceDecoder: sliceDecoder}, nil + } + if typ.Implements(anyType) { + return &anyCodec{}, nil + } + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) + } + return &stringCodec{}, nil + case reflect.Int: + if typeName != "int" { + return decoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) + } + return &intCodec{}, nil + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) + } + return &int8Codec{}, nil + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) + } + return &int16Codec{}, nil + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) + } + return &int32Codec{}, nil + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) + } + return &int64Codec{}, nil + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) + } + return &uintCodec{}, nil + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) + } + return &uint8Codec{}, nil + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) + } + return &uint16Codec{}, nil + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) + } + return &uint32Codec{}, nil + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) + } + return &uintptrCodec{}, nil + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) + } + return &uint64Codec{}, nil + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) + } + return &float32Codec{}, nil + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) + } + return &float64Codec{}, nil + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) + } + return &boolCodec{}, nil + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{}, nil + } + return &nonEmptyInterfaceCodec{}, nil + case reflect.Struct: + return prefix(fmt.Sprintf("[%s]", typeName)).addToDecoder(decoderOfStruct(cfg, typ)) + case reflect.Array: + return prefix("[array]").addToDecoder(decoderOfArray(cfg, typ)) + case reflect.Slice: + return prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) + case reflect.Map: + return prefix("[map]").addToDecoder(decoderOfMap(cfg, typ)) + case reflect.Ptr: + return prefix("[optional]").addToDecoder(decoderOfOptional(cfg, typ)) + default: + return nil, fmt.Errorf("unsupported type: %v", typ) + } +} + +func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + cacheKey := typ + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder, nil + } + encoder = getTypeEncoderFromExtension(typ) + if encoder != nil { + cfg.addEncoderToCache(cacheKey, encoder) + return encoder, nil + } + encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey} + cfg.addEncoderToCache(cacheKey, encoder) + encoder, err := createEncoderOfType(cfg, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder, err +} + +func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{}, nil + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{}, nil + } + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{}, nil + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{}, nil + } + if typ.Implements(marshalerType) { + checkIsEmpty, err := createCheckIsEmpty(typ) + if err != nil { + return nil, err + } + templateInterface := reflect.New(typ).Elem().Interface() + var encoder ValEncoder = &marshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + if typ.Kind() == reflect.Ptr { + encoder = &optionalEncoder{encoder} + } + return encoder, nil + } + if typ.Implements(textMarshalerType) { + checkIsEmpty, err := createCheckIsEmpty(typ) + if err != nil { + return nil, err + } + templateInterface := reflect.New(typ).Elem().Interface() + var encoder ValEncoder = &textMarshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + if typ.Kind() == reflect.Ptr { + encoder = &optionalEncoder{encoder} + } + return encoder, nil + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + return &base64Codec{}, nil + } + if typ.Implements(anyType) { + return &anyCodec{}, nil + } + return createEncoderOfSimpleType(cfg, typ) +} + +func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) { + kind := typ.Kind() + switch kind { + case reflect.String: + return &stringCodec{}, nil + case reflect.Int: + return &intCodec{}, nil + case reflect.Int8: + return &int8Codec{}, nil + case reflect.Int16: + return &int16Codec{}, nil + case reflect.Int32: + return &int32Codec{}, nil + case reflect.Int64: + return &int64Codec{}, nil + case reflect.Uint: + return &uintCodec{}, nil + case reflect.Uint8: + return &uint8Codec{}, nil + case reflect.Uint16: + return &uint16Codec{}, nil + case reflect.Uint32: + return &uint32Codec{}, nil + case reflect.Uintptr: + return &uintptrCodec{}, nil + case reflect.Uint64: + return &uint64Codec{}, nil + case reflect.Float32: + return &float32Codec{}, nil + case reflect.Float64: + return &float64Codec{}, nil + case reflect.Bool: + return &boolCodec{}, nil + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{}, nil + } + return &nonEmptyInterfaceCodec{}, nil + case reflect.Struct: + return &structEncoder{}, nil + case reflect.Array: + return &arrayEncoder{}, nil + case reflect.Slice: + return &sliceEncoder{}, nil + case reflect.Map: + return &mapEncoder{}, nil + case reflect.Ptr: + return &optionalEncoder{}, nil + default: + return nil, fmt.Errorf("unsupported type: %v", typ) + } +} + +func createEncoderOfSimpleType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) + } + return &stringCodec{}, nil + case reflect.Int: + if typeName != "int" { + return encoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) + } + return &intCodec{}, nil + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) + } + return &int8Codec{}, nil + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) + } + return &int16Codec{}, nil + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) + } + return &int32Codec{}, nil + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) + } + return &int64Codec{}, nil + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) + } + return &uintCodec{}, nil + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) + } + return &uint8Codec{}, nil + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) + } + return &uint16Codec{}, nil + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) + } + return &uint32Codec{}, nil + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) + } + return &uintptrCodec{}, nil + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) + } + return &uint64Codec{}, nil + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) + } + return &float32Codec{}, nil + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) + } + return &float64Codec{}, nil + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) + } + return &boolCodec{}, nil + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{}, nil + } + return &nonEmptyInterfaceCodec{}, nil + case reflect.Struct: + return prefix(fmt.Sprintf("[%s]", typeName)).addToEncoder(encoderOfStruct(cfg, typ)) + case reflect.Array: + return prefix("[array]").addToEncoder(encoderOfArray(cfg, typ)) + case reflect.Slice: + return prefix("[slice]").addToEncoder(encoderOfSlice(cfg, typ)) + case reflect.Map: + return prefix("[map]").addToEncoder(encoderOfMap(cfg, typ)) + case reflect.Ptr: + return prefix("[optional]").addToEncoder(encoderOfOptional(cfg, typ)) + default: + return nil, fmt.Errorf("unsupported type: %v", typ) + } +} + +func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + elemType := typ.Elem() + decoder, err := decoderOfType(cfg, elemType) + if err != nil { + return nil, err + } + return &optionalDecoder{elemType, decoder}, nil +} + +func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + elemType := typ.Elem() + elemEncoder, err := encoderOfType(cfg, elemType) + if err != nil { + return nil, err + } + encoder := &optionalEncoder{elemEncoder} + if elemType.Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + return encoder, nil +} + +func decoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + decoder, err := decoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + mapInterface := reflect.New(typ).Interface() + return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)}, nil +} + +func extractInterface(val interface{}) emptyInterface { + return *((*emptyInterface)(unsafe.Pointer(&val))) +} + +func encoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + elemType := typ.Elem() + encoder, err := encoderOfType(cfg, elemType) + if err != nil { + return nil, err + } + mapInterface := reflect.New(typ).Elem().Interface() + if cfg.sortMapKeys { + return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil + } + return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_array.go b/vendor/github.com/json-iterator/go/feature_reflect_array.go new file mode 100644 index 00000000000..e23f187b7c8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_array.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +func decoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + decoder, err := decoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + return &arrayDecoder{typ, typ.Elem(), decoder}, nil +} + +func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + encoder, err := encoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + if typ.Elem().Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + return &arrayEncoder{typ, typ.Elem(), encoder}, nil +} + +type arrayEncoder struct { + arrayType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) { + // special optimization for interface{} + e := (*emptyInterface)(unsafe.Pointer(&val)) + if e.word == nil { + stream.WriteArrayStart() + stream.WriteNil() + stream.WriteArrayEnd() + return + } + elemType := encoder.arrayType.Elem() + if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) { + ptr := uintptr(e.word) + e.word = unsafe.Pointer(&ptr) + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + offset := uintptr(0) + iter.ReadArrayCB(func(iter *Iterator) bool { + if offset < decoder.arrayType.Size() { + decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter) + offset += decoder.elemType.Size() + } else { + iter.Skip() + } + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_extension.go b/vendor/github.com/json-iterator/go/feature_reflect_extension.go new file mode 100644 index 00000000000..3dd38299d49 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_extension.go @@ -0,0 +1,413 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + onePtrEmbedded bool + onePtrOptimization bool + Type reflect.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field *reflect.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateDecoder(typ reflect.Type) ValDecoder + CreateEncoder(typ reflect.Type) ValEncoder + DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder := typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + decoder := typeDecoders[typ.Elem().String()] + if decoder != nil { + return &optionalDecoder{typ.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder := typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + encoder := typeEncoders[typ.Elem().String()] + if encoder != nil { + return &optionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) { + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + tag := field.Tag.Get(cfg.getTagKey()) + tagParts := strings.Split(tag, ",") + if tag == "-" { + continue + } + if field.Anonymous && (tag == "" || tagParts[0] == "") { + if field.Type.Kind() == reflect.Struct { + structDescriptor, err := describeStruct(cfg, field.Type) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { + structDescriptor, err := describeStruct(cfg, field.Type.Elem()) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &optionalEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} + binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + fieldNames := calcFieldNames(field.Name, tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + var err error + decoder, err = decoderOfType(cfg, field.Type) + if err != nil { + return nil, err + } + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + var err error + encoder, err = encoderOfType(cfg, field.Type) + if err != nil { + return nil, err + } + // map is stored as pointer in the struct + if field.Type.Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + } + binding := &Binding{ + Field: &field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(cfg, typ, bindings, embeddedBindings), nil +} +func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + onePtrEmbedded := false + onePtrOptimization := false + if typ.NumField() == 1 { + firstField := typ.Field(0) + switch firstField.Type.Kind() { + case reflect.Ptr: + if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct { + onePtrEmbedded = true + } + fallthrough + case reflect.Map: + onePtrOptimization = true + case reflect.Struct: + onePtrOptimization = isStructOnePtr(firstField.Type) + } + } + structDescriptor := &StructDescriptor{ + onePtrEmbedded: onePtrEmbedded, + onePtrOptimization: onePtrOptimization, + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, cfg) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +func isStructOnePtr(typ reflect.Type) bool { + if typ.NumField() == 1 { + firstField := typ.Field(0) + switch firstField.Type.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Struct: + return isStructOnePtr(firstField.Type) + } + } + return false +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type.Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_map.go b/vendor/github.com/json-iterator/go/feature_reflect_map.go new file mode 100644 index 00000000000..005671e01be --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_map.go @@ -0,0 +1,244 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "reflect" + "sort" + "strconv" + "unsafe" +) + +type mapDecoder struct { + mapType reflect.Type + keyType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder + mapInterface emptyInterface +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + // dark magic to cast unsafe.Pointer back to interface{} using reflect.Type + mapInterface := decoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface).Elem() + if iter.ReadNil() { + realVal.Set(reflect.Zero(decoder.mapType)) + return + } + if realVal.IsNil() { + realVal.Set(reflect.MakeMap(realVal.Type())) + } + iter.ReadMapCB(func(iter *Iterator, keyStr string) bool { + elem := reflect.New(decoder.elemType) + decoder.elemDecoder.Decode(unsafe.Pointer(elem.Pointer()), iter) + // to put into map, we have to use reflection + keyType := decoder.keyType + // TODO: remove this from loop + switch { + case keyType.Kind() == reflect.String: + realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem()) + return true + case keyType.Implements(textUnmarshalerType): + textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler) + err := textUnmarshaler.UnmarshalText([]byte(keyStr)) + if err != nil { + iter.ReportError("read map key as TextUnmarshaler", err.Error()) + return false + } + realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem()) + return true + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler) + err := textUnmarshaler.UnmarshalText([]byte(keyStr)) + if err != nil { + iter.ReportError("read map key as TextUnmarshaler", err.Error()) + return false + } + realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem()) + return true + default: + switch keyType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(keyStr, 10, 64) + if err != nil || reflect.Zero(keyType).OverflowInt(n) { + iter.ReportError("read map key as int64", "read int64 failed") + return false + } + realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(keyStr, 10, 64) + if err != nil || reflect.Zero(keyType).OverflowUint(n) { + iter.ReportError("read map key as uint64", "read uint64 failed") + return false + } + realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) + return true + } + } + iter.ReportError("read map key", "unexpected map key type "+keyType.String()) + return true + }) +} + +type mapEncoder struct { + mapType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder + mapInterface emptyInterface +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + stream.WriteObjectStart() + for i, key := range realVal.MapKeys() { + if i != 0 { + stream.WriteMore() + } + encodeMapKey(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + val := realVal.MapIndex(key).Interface() + encoder.elemEncoder.EncodeInterface(val, stream) + } + stream.WriteObjectEnd() +} + +func encodeMapKey(key reflect.Value, stream *Stream) { + if key.Kind() == reflect.String { + stream.WriteString(key.String()) + return + } + if tm, ok := key.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + if err != nil { + stream.Error = err + return + } + stream.writeByte('"') + stream.Write(buf) + stream.writeByte('"') + return + } + switch key.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + stream.writeByte('"') + stream.WriteInt64(key.Int()) + stream.writeByte('"') + return + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + stream.writeByte('"') + stream.WriteUint64(key.Uint()) + stream.writeByte('"') + return + } + stream.Error = &json.UnsupportedTypeError{Type: key.Type()} +} + +func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + return realVal.Len() == 0 +} + +type sortKeysMapEncoder struct { + mapType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder + mapInterface emptyInterface +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + + // Extract and sort the keys. + keys := realVal.MapKeys() + sv := stringValues(make([]reflectWithString, len(keys))) + for i, v := range keys { + sv[i].v = v + if err := sv[i].resolve(); err != nil { + stream.Error = err + return + } + } + sort.Sort(sv) + + stream.WriteObjectStart() + for i, key := range sv { + if i != 0 { + stream.WriteMore() + } + stream.WriteVal(key.s) // might need html escape, so can not WriteString directly + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + val := realVal.MapIndex(key.v).Interface() + encoder.elemEncoder.EncodeInterface(val, stream) + } + stream.WriteObjectEnd() +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflectWithString + +type reflectWithString struct { + v reflect.Value + s string +} + +func (w *reflectWithString) resolve() error { + if w.v.Kind() == reflect.String { + w.s = w.v.String() + return nil + } + if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + w.s = string(buf) + return err + } + switch w.v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + w.s = strconv.FormatInt(w.v.Int(), 10) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + w.s = strconv.FormatUint(w.v.Uint(), 10) + return nil + } + return &json.UnsupportedTypeError{Type: w.v.Type()} +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s } + +func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + return realVal.Len() == 0 +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_native.go b/vendor/github.com/json-iterator/go/feature_reflect_native.go new file mode 100644 index 00000000000..b37dab3d8a1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_native.go @@ -0,0 +1,672 @@ +package jsoniter + +import ( + "encoding" + "encoding/base64" + "encoding/json" + "unsafe" +) + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type intCodec struct { +} + +func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int)(ptr)) = iter.ReadInt() +} + +func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt(*((*int)(ptr))) +} + +func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int)(ptr)) == 0 +} + +type uintptrCodec struct { +} + +func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) +} + +func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(uint64(*((*uintptr)(ptr)))) +} + +func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uintptr)(ptr)) == 0 +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int8)(ptr)) = iter.ReadInt8() +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int16)(ptr)) = iter.ReadInt16() +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int32)(ptr)) = iter.ReadInt32() +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*int64)(ptr)) = iter.ReadInt64() +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uintCodec struct { +} + +func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint)(ptr)) = iter.ReadUint() +} + +func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint(*((*uint)(ptr))) +} + +func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint8)(ptr)) = iter.ReadUint8() +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint16)(ptr)) = iter.ReadUint16() +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint32)(ptr)) = iter.ReadUint32() +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*uint64)(ptr)) = iter.ReadUint64() +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*float32)(ptr)) = iter.ReadFloat32() +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*float64)(ptr)) = iter.ReadFloat64() +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*bool)(ptr)) = iter.ReadBool() +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type emptyInterfaceCodec struct { +} + +func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*interface{})(ptr)) = iter.Read() +} + +func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteVal(*((*interface{})(ptr))) +} + +func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteVal(val) +} + +func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { + return ptr == nil +} + +type nonEmptyInterfaceCodec struct { +} + +func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + if nonEmptyInterface.itab == nil { + iter.ReportError("read non-empty interface", "do not know which concrete type to decode to") + return + } + var i interface{} + e := (*emptyInterface)(unsafe.Pointer(&i)) + e.typ = nonEmptyInterface.itab.typ + e.word = nonEmptyInterface.word + iter.ReadVal(&i) + nonEmptyInterface.word = e.word +} + +func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + var i interface{} + e := (*emptyInterface)(unsafe.Pointer(&i)) + e.typ = nonEmptyInterface.itab.typ + e.word = nonEmptyInterface.word + stream.WriteVal(i) +} + +func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteVal(val) +} + +func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + return nonEmptyInterface.word == nil +} + +type anyCodec struct { +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*Any)(ptr)) = iter.ReadAny() +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + (*((*Any)(ptr))).WriteTo(stream) +} + +func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) { + (val.(Any)).WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + return (*((*Any)(ptr))).Size() == 0 +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.Number)(ptr)))) +} + +func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(json.Number))) +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*Number)(ptr)))) +} + +func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(Number))) +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(json.RawMessage))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(RawMessage))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} + +type base64Codec struct { + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + ptrSlice := (*sliceHeader)(ptr) + ptrSlice.Len = 0 + ptrSlice.Cap = 0 + ptrSlice.Data = nil + return + } + switch iter.WhatIsNext() { + case StringValue: + encoding := base64.StdEncoding + src := iter.SkipAndReturnBytes() + src = src[1 : len(src)-1] + decodedLen := encoding.DecodedLen(len(src)) + dst := make([]byte, decodedLen) + len, err := encoding.Decode(dst, src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + dst = dst[:len] + dstSlice := (*sliceHeader)(unsafe.Pointer(&dst)) + ptrSlice := (*sliceHeader)(ptr) + ptrSlice.Data = dstSlice.Data + ptrSlice.Cap = dstSlice.Cap + ptrSlice.Len = dstSlice.Len + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + toGrow := encoding.EncodedLen(len(src)) + stream.ensure(toGrow) + encoding.Encode(stream.buf[stream.n:], src) + stream.n += toGrow + stream.writeByte('"') +} + +func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) { + ptr := extractInterface(val).word + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + toGrow := encoding.EncodedLen(len(src)) + stream.ensure(toGrow) + encoding.Encode(stream.buf[stream.n:], src) + stream.n += toGrow + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect "`) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect "`) + return + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type marshalerEncoder struct { + templateInterface emptyInterface + checkIsEmpty checkIsEmpty +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + templateInterface := encoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + marshaler := (*realInterface).(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} +func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + templateInterface emptyInterface + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + templateInterface := encoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + marshaler := (*realInterface).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + stream.WriteString(string(bytes)) + } +} + +func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + templateInterface emptyInterface +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + templateInterface := decoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + unmarshaler := (*realInterface).(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + templateInterface emptyInterface +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + templateInterface := decoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + unmarshaler := (*realInterface).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_object.go b/vendor/github.com/json-iterator/go/feature_reflect_object.go new file mode 100644 index 00000000000..59b1235c0d4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_object.go @@ -0,0 +1,196 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "strings" + "unsafe" +) + +func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor, err := describeStruct(cfg, typ) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{}, nil + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{structDescriptor.onePtrEmbedded, structDescriptor.onePtrOptimization, finalOrderedFields}, nil +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag.Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +func decoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + bindings := map[string]*Binding{} + structDescriptor, err := describeStruct(cfg, typ) + if err != nil { + return nil, err + } + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + return createStructDecoder(typ, fields) +} + +type structFieldEncoder struct { + field *reflect.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +type structEncoder struct { + onePtrEmbedded bool + onePtrOptimization bool + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() +} + +func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) { + e := (*emptyInterface)(unsafe.Pointer(&val)) + if encoder.onePtrOptimization { + if e.word == nil && encoder.onePtrEmbedded { + stream.WriteObjectStart() + stream.WriteObjectEnd() + return + } + ptr := uintptr(e.word) + e.word = unsafe.Pointer(&ptr) + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_slice.go b/vendor/github.com/json-iterator/go/feature_reflect_slice.go new file mode 100644 index 00000000000..7377eec7b3b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_slice.go @@ -0,0 +1,149 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +func decoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { + decoder, err := decoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + return &sliceDecoder{typ, typ.Elem(), decoder}, nil +} + +func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { + encoder, err := encoderOfType(cfg, typ.Elem()) + if err != nil { + return nil, err + } + if typ.Elem().Kind() == reflect.Map { + encoder = &optionalEncoder{encoder} + } + return &sliceEncoder{typ, typ.Elem(), encoder}, nil +} + +type sliceEncoder struct { + sliceType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + slice := (*sliceHeader)(ptr) + if slice.Data == nil { + stream.WriteNil() + return + } + if slice.Len == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(slice.Data) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + for i := 1; i < slice.Len; i++ { + stream.WriteMore() + elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + slice := (*sliceHeader)(ptr) + return slice.Len == 0 +} + +type sliceDecoder struct { + sliceType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder +} + +// sliceHeader is a safe version of SliceHeader used within this package. +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + slice := (*sliceHeader)(ptr) + if iter.ReadNil() { + slice.Len = 0 + slice.Cap = 0 + slice.Data = nil + return + } + reuseSlice(slice, decoder.sliceType, 4) + slice.Len = 0 + offset := uintptr(0) + iter.ReadArrayCB(func(iter *Iterator) bool { + growOne(slice, decoder.sliceType, decoder.elemType) + decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter) + offset += decoder.elemType.Size() + return true + }) +} + +// grow grows the slice s so that it can hold extra more values, allocating +// more capacity if needed. It also returns the old and new slice lengths. +func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) { + newLen := slice.Len + 1 + if newLen <= slice.Cap { + slice.Len = newLen + return + } + newCap := slice.Cap + if newCap == 0 { + newCap = 1 + } else { + for newCap < newLen { + if slice.Len < 1024 { + newCap += newCap + } else { + newCap += newCap / 4 + } + } + } + newVal := reflect.MakeSlice(sliceType, newLen, newCap) + dst := unsafe.Pointer(newVal.Pointer()) + // copy old array into new array + originalBytesCount := uintptr(slice.Len) * elementType.Size() + srcPtr := (*[1 << 30]byte)(slice.Data) + dstPtr := (*[1 << 30]byte)(dst) + for i := uintptr(0); i < originalBytesCount; i++ { + dstPtr[i] = srcPtr[i] + } + slice.Data = dst + slice.Len = newLen + slice.Cap = newCap +} + +func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) { + if expectedCap <= slice.Cap { + return + } + newVal := reflect.MakeSlice(sliceType, 0, expectedCap) + dst := unsafe.Pointer(newVal.Pointer()) + slice.Data = dst + slice.Cap = expectedCap +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go new file mode 100644 index 00000000000..b3417fd73a7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go @@ -0,0 +1,916 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "strings" + "unsafe" +) + +func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) (ValDecoder, error) { + knownHash := map[int32]struct{}{ + 0: {}, + } + switch len(fields) { + case 0: + return &skipObjectDecoder{typ}, nil + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}, nil + } + case 2: + var fieldHash1 int32 + var fieldHash2 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}, nil + case 3: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3}, nil + case 4: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4}, nil + case 5: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5}, nil + case 6: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6}, nil + case 7: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7}, nil + case 8: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, fieldName8, fieldDecoder8}, nil + case 9: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldName9 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9}, nil + case 10: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldName9 int32 + var fieldName10 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields}, nil + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10}, nil + } + return &generalStructDecoder{typ, fields}, nil +} + +type generalStructDecoder struct { + typ reflect.Type + fields map[string]*structFieldDecoder +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + fieldBytes := iter.readObjectFieldAsBytes() + field := *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder := decoder.fields[strings.ToLower(field)] + if fieldDecoder == nil { + iter.Skip() + } else { + fieldDecoder.Decode(ptr, iter) + } + for iter.nextToken() == ',' { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[strings.ToLower(field)] + if fieldDecoder == nil { + iter.Skip() + } else { + fieldDecoder.Decode(ptr, iter) + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type skipObjectDecoder struct { + typ reflect.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect.Type + fieldHash int32 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type twoFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type threeFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type fourFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type fiveFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type sixFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type sevenFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type eightFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type nineFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder + fieldHash9 int32 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type tenFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder + fieldHash9 int32 + fieldDecoder9 *structFieldDecoder + fieldHash10 int32 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + } +} + +type structFieldDecoder struct { + field *reflect.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream.go b/vendor/github.com/json-iterator/go/feature_stream.go new file mode 100644 index 00000000000..9c8470a03ae --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream.go @@ -0,0 +1,305 @@ +package jsoniter + +import ( + "io" +) + +// Stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + n int + Error error + indention int +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, bufSize), + n: 0, + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.n = 0 +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return len(stream.buf) - stream.n +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return stream.n +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf[:stream.n] +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + for len(p) > stream.Available() && stream.Error == nil { + if stream.out == nil { + stream.growAtLeast(len(p)) + } else { + var n int + if stream.Buffered() == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, stream.Error = stream.out.Write(p) + } else { + n = copy(stream.buf[stream.n:], p) + stream.n += n + stream.Flush() + } + nn += n + p = p[n:] + } + } + if stream.Error != nil { + return nn, stream.Error + } + n := copy(stream.buf[stream.n:], p) + stream.n += n + nn += n + return nn, nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + if stream.Error != nil { + return + } + if stream.Available() < 1 { + stream.growAtLeast(1) + } + stream.buf[stream.n] = c + stream.n++ +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 2 { + stream.growAtLeast(2) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.n += 2 +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 3 { + stream.growAtLeast(3) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.n += 3 +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 4 { + stream.growAtLeast(4) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.buf[stream.n+3] = c4 + stream.n += 4 +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 5 { + stream.growAtLeast(5) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.buf[stream.n+3] = c4 + stream.buf[stream.n+4] = c5 + stream.n += 5 +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + if stream.n == 0 { + return nil + } + n, err := stream.out.Write(stream.buf[0:stream.n]) + if n < stream.n && err == nil { + err = io.ErrShortWrite + } + if err != nil { + if n > 0 && n < stream.n { + copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n]) + } + stream.n -= n + stream.Error = err + return err + } + stream.n = 0 + return nil +} + +func (stream *Stream) ensure(minimal int) { + available := stream.Available() + if available < minimal { + stream.growAtLeast(minimal) + } +} + +func (stream *Stream) growAtLeast(minimal int) { + if stream.out != nil { + stream.Flush() + } + toGrow := len(stream.buf) + if toGrow < minimal { + toGrow = minimal + } + newBuf := make([]byte, len(stream.buf)+toGrow) + copy(newBuf, stream.Buffer()) + stream.buf = newBuf +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.ensure(len(s)) + if stream.Error != nil { + return + } + n := copy(stream.buf[stream.n:], s) + stream.n += n +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeByte('[') + stream.writeByte(']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + stream.ensure(toWrite) + for i := 0; i < toWrite && stream.n < len(stream.buf); i++ { + stream.buf[stream.n] = ' ' + stream.n++ + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_float.go b/vendor/github.com/json-iterator/go/feature_stream_float.go new file mode 100644 index 00000000000..9a404e11d4a --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_float.go @@ -0,0 +1,96 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32)) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + stream.ensure(10) + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[stream.n-1] == '0' { + stream.n-- + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64)) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + stream.ensure(10) + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[stream.n-1] == '0' { + stream.n-- + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_int.go b/vendor/github.com/json-iterator/go/feature_stream_int.go new file mode 100644 index 00000000000..7cfd522c106 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_int.go @@ -0,0 +1,320 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(buf []byte, v uint32, n int) int { + start := v >> 24 + if start == 0 { + buf[n] = byte(v >> 16) + n++ + buf[n] = byte(v >> 8) + n++ + } else if start == 1 { + buf[n] = byte(v >> 8) + n++ + } + buf[n] = byte(v) + n++ + return n +} + +func writeBuf(buf []byte, v uint32, n int) { + buf[n] = byte(v >> 16) + buf[n+1] = byte(v >> 8) + buf[n+2] = byte(v) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.ensure(3) + stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + stream.ensure(4) + n := stream.n + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint8(nval) + } + stream.n = writeFirstBuf(stream.buf, digits[val], n) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + stream.ensure(5) + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) + return + } + r1 := val - q1*1000 + n := writeFirstBuf(stream.buf, digits[q1], stream.n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + stream.ensure(6) + n := stream.n + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint16(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + n = writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + stream.ensure(10) + n := stream.n + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + } else { + r3 := q2 - q3*1000 + stream.buf[n] = byte(q3 + '0') + n++ + writeBuf(stream.buf, digits[r3], n) + n += 3 + } + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + stream.ensure(11) + n := stream.n + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint32(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + } else { + r3 := q2 - q3*1000 + stream.buf[n] = byte(q3 + '0') + n++ + writeBuf(stream.buf, digits[r3], n) + n += 3 + } + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + stream.ensure(20) + n := stream.n + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + n = writeFirstBuf(stream.buf, digits[q3], n) + writeBuf(stream.buf, digits[r3], n) + writeBuf(stream.buf, digits[r2], n+3) + writeBuf(stream.buf, digits[r1], n+6) + stream.n = n + 9 + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + n = writeFirstBuf(stream.buf, digits[q4], n) + writeBuf(stream.buf, digits[r4], n) + writeBuf(stream.buf, digits[r3], n+3) + writeBuf(stream.buf, digits[r2], n+6) + writeBuf(stream.buf, digits[r1], n+9) + stream.n = n + 12 + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + n = writeFirstBuf(stream.buf, digits[q5], n) + } else { + n = writeFirstBuf(stream.buf, digits[q6], n) + r6 := q5 - q6*1000 + writeBuf(stream.buf, digits[r6], n) + n += 3 + } + writeBuf(stream.buf, digits[r5], n) + writeBuf(stream.buf, digits[r4], n+3) + writeBuf(stream.buf, digits[r3], n+6) + writeBuf(stream.buf, digits[r2], n+9) + writeBuf(stream.buf, digits[r1], n+12) + stream.n = n + 15 +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + stream.ensure(20) + n := stream.n + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint64(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + n = writeFirstBuf(stream.buf, digits[q3], n) + writeBuf(stream.buf, digits[r3], n) + writeBuf(stream.buf, digits[r2], n+3) + writeBuf(stream.buf, digits[r1], n+6) + stream.n = n + 9 + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + n = writeFirstBuf(stream.buf, digits[q4], n) + writeBuf(stream.buf, digits[r4], n) + writeBuf(stream.buf, digits[r3], n+3) + writeBuf(stream.buf, digits[r2], n+6) + writeBuf(stream.buf, digits[r1], n+9) + stream.n = n + 12 + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + n = writeFirstBuf(stream.buf, digits[q5], n) + } else { + stream.buf[n] = byte(q6 + '0') + n++ + r6 := q5 - q6*1000 + writeBuf(stream.buf, digits[r6], n) + n += 3 + } + writeBuf(stream.buf, digits[r5], n) + writeBuf(stream.buf, digits[r4], n+3) + writeBuf(stream.buf, digits[r3], n+6) + writeBuf(stream.buf, digits[r2], n+9) + writeBuf(stream.buf, digits[r1], n+12) + stream.n = n + 15 +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_string.go b/vendor/github.com/json-iterator/go/feature_stream_string.go new file mode 100644 index 00000000000..334282f05fa --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_string.go @@ -0,0 +1,396 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML